def test_image_scaler_remover(self): # type: () -> None inputs = [("input", (1, 3, 50, 50))] outputs = [("out", (1, 3, 50, 50), TensorProto.FLOAT)] im_scaler = helper.make_node( "ImageScaler", inputs=["input"], outputs=["scaler_out"], bias=[10, -6, 20], scale=3.0, ) exp = helper.make_node("Exp", inputs=["scaler_out"], outputs=["out"]) onnx_model = _onnx_create_model([im_scaler, exp], inputs, outputs) graph = Graph.from_onnx(onnx_model.graph, onnx_ir_version=5) new_graph = graph.transformed([ImageScalerRemover()]) self.assertEqual(len(graph.nodes), 2) self.assertEqual(len(new_graph.nodes), 1) self.assertEqual(new_graph.nodes[0].inputs[0], "input") self.assertEqual(new_graph.nodes[0].outputs[0], "out") coreml_model = convert(onnx_model) spec = coreml_model.get_spec() self.assertEqual( spec.neuralNetwork.preprocessing[0].scaler.channelScale, 3.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.blueBias, 20.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.greenBias, -6.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.redBias, 10.0)
def test_resize_node_without_scales(self): input_shape = (1, 3, 192, 78) output_shape = (1, 3, 384, 234) roi = from_array(np.array([], dtype=int), name="roi") scales = from_array(np.empty([], dtype=int), name="scales") sizes = from_array(np.empty([], dtype=int), name="sizes") onnx_model_to_test = _onnx_create_single_node_model( "Resize", [input_shape], [output_shape], initializer=[roi, scales, sizes], coordinate_transformation_mode="pytorch_half_pixel", cubic_coeff_a=-0.5, mode="linear", nearest_mode="floor") coreml_model = convert(onnx_model_to_test, minimum_ios_deployment_target="13") self.assertEqual(len(coreml_model.get_spec().neuralNetwork.layers), 1, msg="Wrong number of layers in converted network") layer = coreml_model.get_spec().neuralNetwork.layers[0] self.assertTrue(hasattr(layer, "upsample"), msg="Wrong resize conversion") self.assertEqual(len(layer.upsample.scalingFactor), 2, msg="Wrong number of scaling factors") self.assertSequenceEqual( layer.upsample.scalingFactor, (output_shape[2] // input_shape[2], output_shape[3] // input_shape[3]), msg="Conversion produces wrong scaling factor")
def test_unsupported_op_attribute(self): # type: () -> None onnx_model = _make_model_flatten_axis3() coreml_model = convert(onnx_model, add_custom_layers=True) spec = coreml_model.get_spec() layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].custom) self.assertEqual("Flatten", layers[0].custom.className)
def _test_torch_model_single_io( torch_model, torch_input_shape, coreml_input_shape, minimum_ios_deployment_target="12", decimal=4, opset_version=9, ): # run torch model torch_input = torch.rand(*torch_input_shape) torch_out_raw = torch_model(torch_input) if isinstance(torch_out_raw, tuple): torch_out = torch_out_raw[0].detach().numpy() else: torch_out = torch_out_raw.detach().numpy() # convert to onnx model model_dir = tempfile.mkdtemp() if DEBUG: model_dir = "/tmp" onnx_file = os.path.join(model_dir, "torch_model.onnx") torch.onnx.export(torch_model, torch_input, onnx_file, opset_version=opset_version) onnx_model = onnx.load(onnx_file) # convert to coreml and run coreml_model = convert( onnx_model, minimum_ios_deployment_target=minimum_ios_deployment_target ) output_name = [o.name for o in onnx_model.graph.output][0] initializer_names = {t.name for t in onnx_model.graph.initializer} input_name = [ i.name for i in onnx_model.graph.input if i.name not in initializer_names ][0] input_numpy = torch_input.detach().numpy() if SupportedVersion.is_nd_array_supported(minimum_ios_deployment_target): input_dict = {input_name: input_numpy} # type: ignore else: input_dict = {input_name: np.reshape(input_numpy, coreml_input_shape)} # type: ignore if _IS_MACOS: coreml_out = coreml_model.predict(input_dict, useCPUOnly=True)[output_name] if DEBUG: coreml_model.save(model_dir + "/torch_model.mlmodel") print("coreml_out") print(np.squeeze(coreml_out)) print("torch_out") print(np.squeeze(torch_out)) print("coreml out shape ", coreml_out.shape) print("torch out shape: ", torch_out.shape) # compare _assert_outputs([torch_out], [coreml_out], decimal=decimal) # type: ignore # delete onnx model if not DEBUG: if os.path.exists(model_dir): shutil.rmtree(model_dir)
def test_unsupported_op_attribute_provide_functions( self): # type: () -> None def convert_flatten(builder, node, graph, err): params = NeuralNetwork_pb2.CustomLayerParams() params.className = node.op_type params.description = "Custom layer that corresponds to the ONNX op {}".format( node.op_type, ) params.parameters["axis"].intValue = node.attrs["axis"] builder.add_custom( name=node.name, input_names=node.inputs, output_names=node.outputs, custom_proto_spec=params, ) def test_conversion(onnx_model, add_custom_layers=False): coreml_model = convert( onnx_model, add_custom_layers=add_custom_layers, custom_conversion_functions={"Flatten": convert_flatten}, ) spec = coreml_model.get_spec() layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].custom) self.assertEqual("Flatten", layers[0].custom.className) self.assertEqual(3, layers[0].custom.parameters["axis"].intValue) onnx_model = _make_model_flatten_axis3() # Test with add_custom_layers True convert( onnx_model, add_custom_layers=True, custom_conversion_functions={"Flatten": convert_flatten}, ) # Test with add_custom_layers False convert( onnx_model, add_custom_layers=False, custom_conversion_functions={"Flatten": convert_flatten}, )
def test_unsupported_ops(self): # type: () -> None onnx_model = _make_model_acos_exp_topk() coreml_model = convert(onnx_model, add_custom_layers=True) spec = coreml_model.get_spec() layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].custom) self.assertIsNotNone(layers[2].custom) self.assertEqual("Acos", layers[0].custom.className) self.assertEqual("TopK", layers[2].custom.className)
def test_conversion(onnx_model, add_custom_layers=False): coreml_model = convert( onnx_model, add_custom_layers=add_custom_layers, custom_conversion_functions={"Flatten": convert_flatten}, ) spec = coreml_model.get_spec() layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].custom) self.assertEqual("Flatten", layers[0].custom.className) self.assertEqual(3, layers[0].custom.parameters["axis"].intValue)
def test_convert_image_output_bgr(self): # type: () -> None coreml_model = convert( self.onnx_model, image_input_names=self.input_names, image_output_names=self.output_names, deprocessing_args={"is_bgr": True}, ) if _IS_MACOS: output = coreml_model.predict({self.input_names[0]: self.img})[self.output_names[0]] output = np.array(output)[:, :, :3].transpose((2, 0, 1)) expected_output = self.img_arr[:, :, ::-1].transpose((2, 0, 1)) npt.assert_equal(output, expected_output)
def skip_test_lstm(self): # type: () -> None x = 4 h = 2 seq_length = 3 W = from_array(_random_array((4 * h, x)), name="gate_weights") R = from_array(_random_array((4 * h, h)), name="recursion_weights") B = from_array(_random_array((8 * h, )), name="biases") seq_lens_input = from_array(np.array([seq_length]).astype(np.int32), name="seq_lens_input") initial_h = from_array(np.zeros((1, 1, h)).astype(np.float32), name="initial_h") initial_c = from_array(np.zeros((1, 1, h)).astype(np.float32), name="initial_c") input_shape = (seq_length, 1, x) output_shape_all = (seq_length, 1, h) output_shape_last = (1, 1, h) onnx_model = _onnx_create_single_node_model( "LSTM", [input_shape], [output_shape_all, output_shape_last], initializer=[W, R, B, seq_lens_input, initial_h, initial_c], hidden_size=h, ) X = np.random.rand(*input_shape).astype("float32") # type: ignore import caffe2.python.onnx.backend prepared_backend = caffe2.python.onnx.backend.prepare(onnx_model) out = prepared_backend.run({"input0": X}) caffe2_out_all = out["output0"] caffe2_out_last = out["output1"] coreml_model = convert(onnx_model) inputdict = {} inputdict["input0"] = X inputdict["initial_h"] = np.zeros((h), dtype=np.float32) inputdict["initial_c"] = np.zeros((h), dtype=np.float32) coreml_out_dict = coreml_model.predict(inputdict, useCPUOnly=True) coreml_out_all = coreml_out_dict["output0"] coreml_out_last = coreml_out_dict["output1"] _assert_outputs(caffe2_out_all.flatten(), coreml_out_all.flatten(), decimal=5) _assert_outputs(caffe2_out_last.flatten(), coreml_out_last.flatten(), decimal=5)
def test_unsupported_ops_provide_functions(self): # type: () -> None def convert_acos(builder, node, graph, err): params = NeuralNetwork_pb2.CustomLayerParams() params.className = node.op_type params.description = "Custom layer that corresponds to the ONNX op {}".format( node.op_type, ) builder.add_custom( name=node.name, input_names=node.inputs, output_names=node.outputs, custom_proto_spec=params, ) def convert_topk(builder, node, graph, err): params = NeuralNetwork_pb2.CustomLayerParams() params.className = node.op_type params.description = "Custom layer that corresponds to the ONNX op {}".format( node.op_type, ) params.parameters["axis"].intValue = node.attrs.get("axis", -1) builder.add_custom( name=node.name, input_names=node.inputs, output_names=node.outputs, custom_proto_spec=params, ) onnx_model = _make_model_acos_exp_topk() coreml_model = convert( model=onnx_model, add_custom_layers=True, custom_conversion_functions={ "Acos": convert_acos, "TopK": convert_topk }, ) spec = coreml_model.get_spec() layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].custom) self.assertIsNotNone(layers[2].custom) self.assertEqual("Acos", layers[0].custom.className) self.assertEqual("TopK", layers[2].custom.className) self.assertEqual(0, layers[2].custom.parameters["axis"].intValue)
def test_multiple_image_scaler(self): # type : () -> None inputs = [("input_color", (1, 3, 10, 10)), ("input_gray", (1, 1, 10, 10))] outputs = [("out", (1, 4, 10, 10), TensorProto.FLOAT)] im_scaler1 = helper.make_node( "ImageScaler", inputs=["input_color"], outputs=["scaler_out_1"], bias=[10, -6, 20], scale=3.0, ) im_scaler2 = helper.make_node( "ImageScaler", inputs=["input_gray"], outputs=["scaler_out_2"], bias=[-13], scale=5.0, ) concat = helper.make_node("Concat", inputs=["scaler_out_1", "scaler_out_2"], outputs=["out"], axis=1) onnx_model = _onnx_create_model([im_scaler1, im_scaler2, concat], inputs, outputs) spec = convert(onnx_model).get_spec() self.assertEqual(len(spec.neuralNetwork.layers), 1) self.assertEqual(len(spec.neuralNetwork.preprocessing), 2) self.assertEqual( spec.neuralNetwork.preprocessing[0].scaler.channelScale, 3.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.blueBias, 20.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.greenBias, -6.0) self.assertEqual(spec.neuralNetwork.preprocessing[0].scaler.redBias, 10.0) self.assertEqual( spec.neuralNetwork.preprocessing[1].scaler.channelScale, 5.0) self.assertEqual(spec.neuralNetwork.preprocessing[1].scaler.grayBias, -13.0)
def _coreml_forward_onnx_model( model, # type: ModelProto input_dict, # type: Dict[Text, np._ArrayLike[Any]] onnx_coreml_input_shape_map={}, # type: Dict[Text, List[int,...]] minimum_ios_deployment_target="12", ): # type: (...) -> np.ndarray[Any] coreml_model = convert( model, onnx_coreml_input_shape_map=onnx_coreml_input_shape_map, minimum_ios_deployment_target=minimum_ios_deployment_target, ) output_names = [o.name for o in model.graph.output] return _coreml_forward_model( coreml_model, input_dict, output_names, minimum_ios_deployment_target=minimum_ios_deployment_target, )
def test_convert_image_input_preprocess(self): # type: () -> None bias = np.array([100, 90, 80]) coreml_model = convert( self.onnx_model, image_input_names=self.input_names, preprocessing_args={ "is_bgr": True, "blue_bias": bias[0], "green_bias": bias[1], "red_bias": bias[2], }, ) if _IS_MACOS: output = coreml_model.predict({self.input_names[0]: self.img})[self.output_names[0]] expected_output = self.img_arr[:, :, ::-1].transpose((2, 0, 1)) expected_output[0] = expected_output[0] + bias[0] expected_output[1] = expected_output[1] + bias[1] expected_output[2] = expected_output[2] + bias[2] npt.assert_equal(output.flatten(), expected_output.flatten())
def main(model_path): basename = model_path.split('.onnx')[0] model = convert(model_path, minimum_ios_deployment_target='13') model.short_description = "ONNX Model converted with coremltools" model.save(f"{basename}.mlmodel")
def test_convert_image_output(self): # type: () -> None coreml_model = convert(self.onnx_model, image_output_names=self.output_names) spec = coreml_model.get_spec() for output in spec.description.output: self.assertEqual(output.type.WhichOneof("Type"), "imageType")
from coremltools.converters.onnx import convert # Load the ONNX model as a CoreML model model = convert( model='onnx_files/model_Linknet_resnet18_DiceLoss_best_model40.onnx', image_input_names=['my_input'], image_output_names=['my_output'], preprocessing_args={'image_scale': 1./255.}, minimum_ios_deployment_target='13') # Save the CoreML model model.save('mlmodel_files/model_PAN_resnet18_DiceLoss_best_model40.mlmodel')
def convert_to_coreml(self, module, config): coremlModule = coreml.convert(module, minimum_ios_deployment_target='13') coremlModule.save(config.dst)