def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') model.train() # CoreML exports should be placed in model.train() mode ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) ct_model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') return ct_model
def test_mil_default_value_runtime(self): program_input_spec = [ ct.TensorType(name="x", shape=[1], default_value=np.array([1.0]).astype(np.float32)), ct.TensorType(name="y", shape=[1]) ] mlmodel = ct.convert(self.basic_network, convert_to="mlprogram", inputs=program_input_spec) if _macos_version() < (12, 0): # Can only get predictions for ml program on macOS 12+ return res = mlmodel.predict({"x": np.array([3.]), "y": np.array([2.])}) assert res["output"][0] == 5.0 res = mlmodel.predict({"y": np.array([2.])}) assert res["output"][0] == 3.0
def test(): torch_model = torchvision.models.mobilenet_v2(pretrained=True) # Set the model in evaluation mode # torch_model.eval() # example_input = torch.rand(1, 3, 224, 224) # after test, will get 'size mismatch' error message with size 256x256 # traced_model = torch.jit.trace(torch_model, example_input) fastpunct = FastPunct() traced_model = fastpunct.model traced_model.save('aaa.h5') # Convert to Core ML using the Unified Conversion API model = ct.convert( traced_model, 'pytorch', inputs=[ct.ImageType(name="input_1", shape=example_input.shape) ] # provide only if step 2 was performed ) # Save model model.save("MobileNetV2.mlmodel")
def test_tf2keras_enumerated_shapes(): # Test examples in https://coremltools.readme.io/docs/flexible-inputs import tensorflow as tf input_shape = (28, 28, 3) # None denotes seq_len dimension x = tf.keras.Input(shape=input_shape, name="input") C_out = 2 kHkW = 3 y = tf.keras.layers.Conv2D(C_out, kHkW, activation='relu', input_shape=input_shape)(x) keras_model = tf.keras.Model(inputs=[x], outputs=[y]) # One RangeDim shared by two inputs shapes = [(1, 28, 28, 3), (1, 56, 56, 3)] enumerated_shapes = ct.EnumeratedShapes(shapes=shapes) tensor_input = ct.TensorType(name="input", shape=enumerated_shapes) mlmodel = ct.convert(keras_model, inputs=[tensor_input]) # Test (1, 28, 28, 3) shape test_input_x = np.random.rand(*shapes[0]).astype(np.float32) expected_val = keras_model([test_input_x]) if ct.utils._is_macos(): results = mlmodel.predict({"input": test_input_x}) np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-2) # Test (1, 56, 56, 3) shape (can't verify numerical parity with Keras # which doesn't support enumerated shape) test_input_x = np.random.rand(*shapes[1]).astype(np.float32) results = mlmodel.predict({"input": test_input_x}) # Test with a wrong shape with pytest.raises( RuntimeError, match=r"not compatible with the model\'s feature"): test_input_x = np.random.rand(1, 29, 29, 3).astype(np.float32) results = mlmodel.predict({"input": test_input_x})
def test_tf2_image_enumerated_shapes(): import tensorflow as tf keras_model = tf.keras.applications.MobileNetV2( input_shape=( None, None, 3, ), classes=1000, include_top=False, ) input_shapes = ct.EnumeratedShapes(shapes=[(1, 192, 192, 3), (1, 224, 224, 3)]) image_input = ct.ImageType(shape=input_shapes, bias=[-1, -1, -1], scale=1 / 127) model = ct.convert(keras_model, inputs=[image_input]) assert model is not None spec = model.get_spec() assert len(spec.description.input[0].type.imageType.enumeratedSizes. sizes) == 2
def test_classifier(self): torch_model = torch.nn.ReLU().eval() traced_model = torch.jit.trace(torch_model, torch.rand(3, )) model = ct.convert( traced_model, inputs=[ct.TensorType(shape=(3, ), dtype=np.float16)], outputs=[ct.TensorType(dtype=np.float16)], classifier_config=ct.ClassifierConfig(['a', 'b', 'c']), convert_to='mlprogram', minimum_deployment_target=ct.target.macOS13, ) assert_input_dtype(model, expected_type_str="fp16") assert_ops_in_mil_program(model, ["relu", "cast", "classify"]) spec = model.get_spec() input_name = spec.description.input[0].name out_dict = model.predict({input_name: np.array([1.0, 2.0, 3.0])}) assert 'classLabel' in out_dict assert out_dict['classLabel'] == 'c' assert len(spec.description.output) == 2 assert "classLabel_probs" in out_dict assert isinstance(out_dict["classLabel_probs"], dict)
def test_convert_tf_keras_applications_model(): import tensorflow as tf tf_keras_model = tf.keras.applications.MobileNet( weights="imagenet", input_shape=(224, 224, 3) ) # inputs / outputs are optional, we can get from tf.keras model # this can be extremely helpful when we want to extract sub-graphs input_name = tf_keras_model.inputs[0].name.split(":")[0] # note that the `convert()` requires tf.Graph's outputs instead of # tf.keras.Model's outputs, to access that, we can do the following output_name = tf_keras_model.outputs[0].name.split(":")[0] tf_graph_output_name = output_name.split("/")[-1] mlmodel = ct.convert( tf_keras_model, inputs=[ct.TensorType(shape=(1, 224, 224, 3))], outputs=[tf_graph_output_name], ) mlmodel.save("./mobilenet.mlmodel")
def test_fusion_with_image_full(self): # Avoid circular import from coremltools import convert @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) def prog(x): x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) x2 = mb.relu(x=x) x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) x4 = mb.add(x=x1, y=x3) return mb.relu(x=x4) mlmodel = convert(prog, inputs=[ ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False) ], source="milinternal", convert_to="neuralnetwork") assert mlmodel is not None assert len(mlmodel.get_spec().neuralNetwork.layers) == 3
def test_mil_enumerated_multiarray_with_default(self): enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) input_shape = [ ct.TensorType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30))) ] mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) input_spec = mlmodel.get_spec().description.input assert len(input_spec) == 1, "1 input expected, got {} instead".format( len(input_spec)) assert input_spec[ 0].name == "x", "input name in MLModel is {}, 'x' is expected".format( input_spec[0].name) assert input_spec[0].type.WhichOneof( "Type" ) == "multiArrayType", "Expected multiArrayType, got {}".format( input_spec[0].type.WhichOneof("Type")) assert input_spec[0].type.multiArrayType.WhichOneof( "ShapeFlexibility" ) == "enumeratedShapes", "Expected enumeratedShapes in ShapeFlexibility" spec_default_shape = [ s for s in input_spec[0].type.multiArrayType.shape ] spec_enumerated_shapes = set() for enumerated in input_spec[ 0].type.multiArrayType.enumeratedShapes.shapes: spec_enumerated_shapes.add(tuple([s for s in enumerated.shape])) assert spec_default_shape == [ 1, 3, 10, 30 ], "Expected default shape to be [1, 3, 10, 10], got {} instead".format( str(spec_default_shape)) assert spec_enumerated_shapes == set( enumerated_shapes), "Enumerated shape mismatch"
def test_image_input_rangedim(self, convert_to): example_input = torch.rand(1, 3, 50, 50) * 255 traced_model = torch.jit.trace(TestConvModule().eval(), example_input) input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) model = ct.convert(traced_model, inputs=[ct.ImageType(shape=input_shape)], convert_to=convert_to) spec = model.get_spec() assert spec.description.input[0].type.imageType.width == 45 assert spec.description.input[0].type.imageType.height == 45 assert spec.description.input[ 0].type.imageType.imageSizeRange.widthRange.lowerBound == 25 assert spec.description.input[ 0].type.imageType.imageSizeRange.widthRange.upperBound == 100 _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input, is_image_input=True)
def test_grayscale_input_image(self, rank4_grayscale_input_model): mlmodel = ct.convert( rank4_grayscale_input_model, inputs=[ ct.ImageType(name="input_image", shape=(1, 1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE) ], outputs=[ct.TensorType(name="output")], minimum_deployment_target=ct.target.macOS13, ) sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8), 'L') model_output = mlmodel.predict({"input_image": img_input})['output'] reference_output = rank4_grayscale_input_model( torch.from_numpy(sample_input.astype( np.float32))).detach().numpy() np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2)
def convert(input_model_path, output_model_path, input_config_json, compiler_options_json): input_config = json.loads(input_config_json) compiler_options = None if compiler_options_json is not None: compiler_options = json.loads(compiler_options_json) try: inputs = get_input_list(input_config) classifier_config = get_classifier_config(compiler_options) mlmodel = ct.convert(input_model_path, inputs=inputs, classifier_config=classifier_config) mlmodel.save(output_model_path) print("MLMODEL was saved to ", output_model_path) except ValueError as e: print('ValueError:', e) err_str = str(e) print("Error (str) {}".format(e)) for x in DISPLAY_TO_USER_ERRORS: if x in err_str: sys.exit(4) sys.exit(1)
def test_tf2keras_shared_range_dim(use_symbol): # Test examples in https://coremltools.readme.io/docs/flexible-inputs import tensorflow as tf input_dim = 3 # None denotes seq_len dimension x1 = tf.keras.Input(shape=(None, input_dim), name="seq1") x2 = tf.keras.Input(shape=(None, input_dim), name="seq2") y = x1 + x2 keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y]) # One RangeDim shared by two inputs if use_symbol: seq_len_dim = ct.RangeDim(symbol='seq_len') else: # symbol is optional seq_len_dim = ct.RangeDim() seq1_input = ct.TensorType(name="seq1", shape=(1, seq_len_dim, input_dim)) seq2_input = ct.TensorType(name="seq2", shape=(1, seq_len_dim, input_dim)) mlmodel = ct.convert(keras_model, inputs=[seq1_input, seq2_input]) batch = 1 seq_len = 5 test_input_x1 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) expected_val = keras_model([test_input_x1, test_input_x2]) if ct.utils._is_macos(): results = mlmodel.predict({ "seq1": test_input_x1, "seq2": test_input_x2 }) np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-4, atol=1e-3)
def test_prediction_with_fp16_io(self): torch_model = torch.nn.Linear(30, 5).eval() traced_model = torch.jit.trace(torch_model, torch.rand(1, 30)) mlmodel = ct.convert( traced_model, inputs=[ ct.TensorType(name="input", shape=(1, 30), dtype=np.float32) ], outputs=[ct.TensorType(dtype=np.float32)], minimum_deployment_target=ct.target.macOS13, compute_units=ct.ComputeUnit.CPU_ONLY, ) # test prediction sample_input = np.random.rand(1, 30).astype(np.float32) * 10 model_output = mlmodel.predict( {"input": sample_input})[mlmodel._spec.description.output[0].name] reference_output = traced_model( torch.from_numpy(sample_input)).detach().numpy() np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2)
def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model): with pytest.raises(ValueError, match="must have rank 4"): mlmodel = ct.convert(rank3_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], minimum_deployment_target=ct.target.macOS13, ) # invalid shape with pytest.raises(ValueError): mlmodel = ct.convert(rank4_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], minimum_deployment_target=ct.target.macOS13, ) mlmodel = ct.convert(rank4_grayscale_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], minimum_deployment_target=ct.target.macOS13, ) assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"]) assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") verify_prediction(mlmodel) with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): mlmodel = ct.convert(rank4_grayscale_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], minimum_deployment_target=ct.target.macOS12, ) # test that grayscale_16 raises error when used with neural network with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): mlmodel = ct.convert(rank4_grayscale_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], ) mlmodel = ct.convert(rank4_grayscale_input_model, inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], outputs=[ct.TensorType(dtype=np.float16)], minimum_deployment_target=ct.target.macOS13, ) assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"]) assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") assert_output_dtype(mlmodel, expected_type_str="fp16") verify_prediction(mlmodel)
def convertToCoremlSpec(torchScript, sampleInput): """ Converts a torchscript to a coreml model """ try: print(f"Starting CoreML conversion with coremltools {ct.__version__}") nnSpec = ct.convert( torchScript, inputs=[ ct.ImageType( name="image", shape=sampleInput.shape, scale=1 / 255.0, bias=[0, 0, 0], ) ], ).get_spec() print(f"CoreML conversion success") except Exception as e: print(f"CoreML conversion failure: {e}") return return nnSpec
def test_multiarray_input_rangedim(self, convert_to): if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): return example_input = torch.rand(1, 3, 50, 50) * 100 traced_model = torch.jit.trace(TestConvModule().eval(), example_input) input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) model = ct.convert(traced_model, inputs=[ct.TensorType(shape=input_shape)], convert_to=convert_to) spec = model.get_spec() assert list(spec.description.input[0].type.multiArrayType.shape) == [ 1, 3, 45, 45 ] assert spec.description.input[ 0].type.multiArrayType.shapeRange.sizeRanges[2].lowerBound == 25 assert spec.description.input[ 0].type.multiArrayType.shapeRange.sizeRanges[2].upperBound == 100 _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input)
def test_mil_ranged_image_with_default(self): input_shape = [ ct.ImageType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20))) ] mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) input_spec = mlmodel.get_spec().description.input assert len(input_spec) == 1, "1 input expected, got {} instead".format( len(input_spec)) assert input_spec[ 0].name == "x", "input name in MLModel is {}, 'x' is expected".format( input_spec[0].name) assert input_spec[0].type.WhichOneof( "Type") == "imageType", "Expected imageType, got {}".format( input_spec[0].type.WhichOneof("Type")) assert input_spec[0].type.imageType.WhichOneof( "SizeFlexibility" ) == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility" spec_H = input_spec[0].type.imageType.height spec_W = input_spec[0].type.imageType.width assert spec_H == 10 and spec_W == 20, "expected [H, W] == [10, 20], got [{}, {}] instead".format( spec_H, spec_W) spec_H_range = [ input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound ] spec_W_range = [ input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound ] assert spec_H_range == [10, 10], "Ranged height mismatch" assert spec_W_range == [10, 30], "Ranged width mismatch"
def test_mil_ranged_multiarray_with_default(self): input_shape = [ ct.TensorType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20))) ] mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) input_spec = mlmodel.get_spec().description.input assert len(input_spec) == 1, "1 input expected, got {} instead".format( len(input_spec)) assert input_spec[ 0].name == "x", "input name in MLModel is {}, 'x' is expected".format( input_spec[0].name) assert input_spec[0].type.WhichOneof( "Type" ) == "multiArrayType", "Expected multiArrayType, got {}".format( input_spec[0].type.WhichOneof("Type")) assert input_spec[0].type.multiArrayType.WhichOneof( "ShapeFlexibility" ) == "shapeRange", "Expected shapeRange in ShapeFlexibility" spec_default_shape = [ s for s in input_spec[0].type.multiArrayType.shape ] ranged_shapes = [(1, 1), (3, 3), (10, 10), (10, 30)] spec_ranged_shapes = [] for range_dim in input_spec[ 0].type.multiArrayType.shapeRange.sizeRanges: spec_ranged_shapes.append( tuple([range_dim.lowerBound, range_dim.upperBound])) assert spec_default_shape == [ 1, 3, 10, 20 ], "Expected default shape to be [1, 3, 10, 20], got {} instead".format( str(spec_default_shape)) assert spec_ranged_shapes == ranged_shapes, "Enumerated shape mismatch"
def convertCoreML(keras_model, labelbinarizer): # modelName for coreML model modelName = "rooms_coreml" cleaned_path = "." class_labels = labelbinarizer.classes_.tolist() mlconfig = coremltools.ClassifierConfig(class_labels) # load the trained convolutional neural network model = coremltools.convert( keras_model, input_names=["dense_input"], classifier_config=mlconfig, ) spec = model.get_spec() model.author = "Rooms - https://github.com/st0nedB/rooms" model.license = "MIT" model.short_description = "This model can be used to predict in which room a device resides based on BLE beacon measurements." model.versionString = "Version 0.1" model.input_description["dense_input"] = "Vector of input RSSI" model.output_description["Identity"] = "Predicted Room" model.save(cleaned_path + "/" + modelName + ".mlmodel")
def test_can_build_keras_to_coreml_to_relay(): """Test multiple conversion paths and importing from a saved file.""" model = keras.models.Sequential() model.add( keras.layers.Conv2D( filters=6, kernel_size=(1, 1), activation="relu", padding="same", input_shape=(3, 3, 1), data_format="channels_first", ) ) with tempfile.TemporaryDirectory() as tmpdir: kmodel_fn = path.join(tmpdir, "c1mdl.h5") model.save(kmodel_fn) mdl = cm.convert(kmodel_fn) model_file = path.join(tmpdir, "c1.mlmodel") mdl.save(model_file) mdl = cm.models.MLModel(model_file) desc = mdl.get_spec().description iname = desc.input[0].name ishape = desc.input[0].type.multiArrayType.shape shape_dict = {} for i in mdl.get_spec().description.input: iname = i.name ishape = i.type.multiArrayType.shape shape_dict[iname] = ishape mod, params = relay.frontend.from_coreml(mdl, shape_dict) with tvm.transform.PassContext(opt_level=3): relay.build(mod, "llvm", params=params)
def test_mil_enumerated_image_with_default(self): enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) input_shape = [ ct.ImageType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30))) ] mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) input_spec = mlmodel.get_spec().description.input assert len(input_spec) == 1, "1 input expected, got {} instead".format( len(input_spec)) assert input_spec[ 0].name == "x", "input name in MLModel is {}, 'x' is expected".format( input_spec[0].name) assert input_spec[0].type.WhichOneof( "Type") == "imageType", "Expected imageType, got {}".format( input_spec[0].type.WhichOneof("Type")) assert input_spec[0].type.imageType.WhichOneof( "SizeFlexibility" ) == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility" spec_H = input_spec[0].type.imageType.height spec_W = input_spec[0].type.imageType.width assert spec_H == 10 and spec_W == 30, "expected [H, W] == [10, 30], got [{}, {}] instead".format( spec_H, spec_W) spec_enumerated_shapes = set() for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes: spec_enumerated_shapes.add( tuple([1, 3, enumerated.height, enumerated.width])) assert spec_enumerated_shapes == set( enumerated_shapes), "Enumerated shape mismatch"
def test_grayscale_fp16_input_image(self, rank4_grayscale_input_model): mlmodel = ct.convert( rank4_grayscale_input_model, inputs=[ ct.ImageType(name="input_image", shape=(1, 1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE_FLOAT16) ], outputs=[ct.TensorType(name="output")], minimum_deployment_target=ct.target.macOS13, ) # incorrect way to do prediction with pytest.raises( TypeError, match="must be of type PIL.Image.Image with mode=='F'", ): sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) img_input = Image.fromarray( sample_input[0, 0, :, :].astype(np.uint8), 'L') mlmodel.predict({"input_image": img_input}) # correct way to do prediction sample_input = np.random.rand(1, 1, 10, 20) # in between [0, 1] img_input = Image.fromarray( sample_input[0, 0, :, :].astype(np.float32), 'F') model_output = mlmodel.predict({"input_image": img_input})['output'] reference_output = rank4_grayscale_input_model( torch.from_numpy(sample_input.astype( np.float32))).detach().numpy() np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2)
def test_rename_feature_mlprogram(self): torch_model = _torch.nn.ReLU().eval() model = coremltools.convert( _torch.jit.trace(torch_model, _torch.rand(3, )), inputs=[coremltools.TensorType(shape=(3,))], convert_to='mlprogram' ) spec = model.get_spec() input_name = spec.description.input[0].name output_name = spec.description.output[0].name # rename input rename_feature(spec, input_name, "new_input_name") self.assertEqual(spec.description.input[0].name, "new_input_name") model = coremltools.models.MLModel(spec) out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})[output_name] self.assertEqual(out[0], 1.0) # rename output rename_feature(spec, output_name, "new_output_name") self.assertEqual(spec.description.output[0].name, "new_output_name") model = coremltools.models.MLModel(spec) out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})["new_output_name"] self.assertEqual(out[1], 2.0)
print(onnx.helper.printable_graph( onnx_model.graph)) # print a human readable model print('ONNX export success, saved as %s' % f) except Exception as e: print('ONNX export failure: %s' % e) # CoreML export try: import coremltools as ct print('\nStarting CoreML export with coremltools %s...' % ct.__version__) # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0]) ]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) print('CoreML export success, saved as %s' % f) except Exception as e: print('CoreML export failure: %s' % e) # Finish print( '\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
def run( weights='./yolov5s.pt', # weights path img_size=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx', 'coreml'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode optimize=False, # TorchScript: optimize for mobile dynamic=False, # ONNX: dynamic axes simplify=False, # ONNX: simplify model opset_version=12, # ONNX: opset version ): t = time.time() include = [x.lower() for x in include] img_size *= 2 if len(img_size) == 1 else 1 # expand # Load PyTorch model device = select_device(device) assert not ( device.type == 'cpu' and opt.half ), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device) # load FP32 model labels = model.names # Input gs = int(max(model.stride)) # grid size (max stride) img_size = [check_img_size(x, gs) for x in img_size] # verify img_size are gs-multiples img = torch.zeros(batch_size, 3, *img_size).to( device) # image size(1,3,320,192) iDetection # Update model if half: img, model = img.half(), model.half() # to FP16 model.train() if train else model.eval( ) # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() elif isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) for _ in range(2): y = model(img) # dry runs print( f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)" ) # TorchScript export ----------------------------------------------------------------------------------------------- if 'torchscript' in include or 'coreml' in include: prefix = colorstr('TorchScript:') try: print( f'\n{prefix} starting export with torch {torch.__version__}...' ) f = weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) print( f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)' ) except Exception as e: print(f'{prefix} export failure: {e}') # ONNX export ------------------------------------------------------------------------------------------------------ if 'onnx' in include: prefix = colorstr('ONNX:') try: import onnx print(f'{prefix} starting export with onnx {onnx.__version__}...') f = weights.replace('.pt', '.onnx') # filename torch.onnx.export( model, img, f, verbose=False, opset_version=opset_version, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], output_names=['output'], dynamic_axes={ 'images': { 0: 'batch', 2: 'height', 3: 'width' }, # shape(1,3,640,640) 'output': { 0: 'batch', 1: 'anchors' } # shape(1,25200,85) } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify if simplify: try: check_requirements(['onnx-simplifier']) import onnxsim print( f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...' ) model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, input_shapes={'images': list(img.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: print(f'{prefix} simplifier failure: {e}') print( f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)' ) except Exception as e: print(f'{prefix} export failure: {e}') # CoreML export ---------------------------------------------------------------------------------------------------- if 'coreml' in include: prefix = colorstr('CoreML:') try: import coremltools as ct print( f'{prefix} starting export with coremltools {ct.__version__}...' ) assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' model = ct.convert(ts, inputs=[ ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0]) ]) f = weights.replace('.pt', '.mlmodel') # filename model.save(f) print( f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)' ) except Exception as e: print(f'{prefix} export failure: {e}') # Finish print( f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.' )
import onnx print('\nStarting ONNX export with onnx %s...' % onnx.__version__) f = opt.weights.replace('.pt', '.onnx') # filename model.fuse() # only for ONNX torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], output_names=['classes', 'boxes'] if y is None else ['output']) # Checks onnx_model = onnx.load(f) # load onnx model onnx.checker.check_model(onnx_model) # check onnx model print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model print('ONNX export success, saved as %s' % f) except Exception as e: print('ONNX export failure: %s' % e) # CoreML export try: import coremltools as ct print('\nStarting CoreML export with coremltools %s...' % ct.__version__) model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape)]) # convert f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) print('CoreML export success, saved as %s' % f) except Exception as e: print('CoreML export failure: %s' % e) # Finish print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.')
def test_mil_as_package(self): import torch num_tokens = 3 embedding_size = 5 class TestModule(torch.nn.Module): def __init__(self): super(TestModule, self).__init__() self.embedding = torch.nn.Embedding(num_tokens, embedding_size) def forward(self, x): return self.embedding(x) model = TestModule() model.eval() example_input = torch.randint(high=num_tokens, size=(2, ), dtype=torch.int64) traced_model = torch.jit.trace(model, example_input) temp_package_dir = tempfile.TemporaryDirectory(suffix=".mlpackage") for converted_package_path in [None, temp_package_dir.name]: mlmodel = coremltools.convert( traced_model, package_dir=converted_package_path, source='pytorch', convert_to='mlprogram', compute_precision=coremltools.precision.FLOAT32, inputs=[ coremltools.TensorType( name="input", shape=example_input.shape, dtype=example_input.numpy().dtype, ) ], ) assert isinstance(mlmodel, MLModel) package_path = tempfile.mkdtemp(suffix=".mlpackage") mlmodel.save(package_path) assert ModelPackage.isValid(package_path) assert os.path.exists( ModelPackage(package_path).getRootModel().path()) # Read back the saved bundle and compile mlmodel2 = MLModel(package_path, compute_units=ComputeUnit.CPU_ONLY) if utils._macos_version() >= (12, 0): result = mlmodel2.predict({ "input": example_input.cpu().detach().numpy().astype(np.float32) }) # Verify outputs expected = model(example_input) name = list(result.keys())[0] np.testing.assert_allclose(result[name], expected.cpu().detach().numpy()) # Cleanup package shutil.rmtree(package_path) tmp_package_path = mlmodel.package_path assert os.path.exists(tmp_package_path) del mlmodel if converted_package_path is not None: # Verify we leave the provided package dir alone assert os.path.exists(tmp_package_path) temp_package_dir.cleanup()
help='Net file to be converted to a model checkpoint.') argparser.add_argument('--start', type=int, default=0, help='Offset to set global_step to.') argparser.add_argument('--cfg', type=argparse.FileType('r'), help='yaml configuration with training parameters') argparser.add_argument('-e', '--ignore-errors', action='store_true', help='Ignore missing and wrong sized values.') args = argparser.parse_args() cfg = yaml.safe_load(args.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) START_FROM = args.start tfp = tfprocess.TFProcess(cfg) tfp.init_net_v2() tfp.replace_weights_v2(args.net, args.ignore_errors) tfp.global_step.assign(START_FROM) root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) tfp.manager.save(checkpoint_number=START_FROM) print("Wrote model to {}".format(tfp.manager.latest_checkpoint)) coreml_model = ct.convert(tfp.model, source='tensorflow') coreml_model.save(args.net + '.mlmodel')
### tensorflow==2.3.1 import tensorflow as tf import coremltools as ct mlmodel = ct.convert('saved_model_age', source='tensorflow') mlmodel.save("age_gender_recognition_62x62_float32.mlmodel")