def test_dead_layer_partial_branch(self): convergence_tolerance = 1e-8 input_features = [("input", datatypes.Array(*(2,)))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) # add condition to break from the loop, if convergence criterion is met builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance) branch_layer = builder.add_branch("branch_layer", "cond") builder_ifbranch = neural_network.NeuralNetworkBuilder( nn_spec=branch_layer.branch.ifBranch ) builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out") builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out") builder_elsebranch = neural_network.NeuralNetworkBuilder( nn_spec=branch_layer.branch.elseBranch ) builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out") builder_elsebranch.add_activation( "linear_red_1", "LINEAR", "input", "linear_red1_out" ) builder_elsebranch.add_activation( "linear_red_2", "LINEAR", "linear_red1_out", "linear_red2_out" ) builder_elsebranch.add_activation( "linear2", "LINEAR", "linear1_out", "relu2_out" ) builder.add_squeeze("out", "relu2_out", "out", squeeze_all=True) mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) if not _IS_MACOS: # Can not get predictions unless on macOS. return data = np.random.rand(2,) data_dict = {"input": data} before_pass_out = mlmodel.predict(data_dict)["out"] if DEBUG: print("\n mlmodel description before remove disconnected layers pass: \n") print_network_spec(builder.spec, style="coding") old_spec = copy.copy(builder.spec) remove_disconnected_layers(builder.spec) if DEBUG: print("\n mlmodel description after remove disconnected layers pass: \n") print_network_spec(builder.spec, style="coding") mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) after_pass_out = mlmodel.predict(data_dict)["out"] np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2) np.testing.assert_equal( len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers), len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers), ) np.testing.assert_equal( len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2 )
def test_dead_layer_remove_branch(self): convergence_tolerance = 1e-8 input_features = [('input', datatypes.Array(*(2,)))] output_features = [('out', None)] builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True) # add condition to break from the loop, if convergence criterion is met builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance) branch_layer = builder.add_branch('branch_layer', 'cond') builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch) builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out') builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out') builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch) builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out') builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out') builder.add_squeeze('out', 'input', 'out', squeeze_all=True) mlmodel = MLModel(builder.spec) data = np.random.rand(2,) data_dict = {'input': data} before_pass_out = mlmodel.predict(data_dict)['out'] if DEBUG: print('\n mlmodel description before remove disconnected layers pass: \n') print_network_spec(builder.spec, style='coding') remove_disconnected_layers(builder.spec) if DEBUG: print('\n mlmodel description after remove disconnected layers pass: \n') print_network_spec(builder.spec, style='coding') mlmodel = MLModel(builder.spec) after_pass_out = mlmodel.predict(data_dict)['out'] np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=4) np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
def test_future_version(self): self.spec.specificationVersion = 10000 model = MLModel(self.spec) # this model should exist, but throw an exception when we try to use predict because the engine doesn't support # this model version self.assertIsNotNone(model) with self.assertRaises(Exception): model.predict(1) self.spec.specificationVersion = 1
def test_pipeline_rename(self): # Convert scikit_spec = converter.convert(self.scikit_model).get_spec() model = MLModel(scikit_spec) sample_data = self.scikit_data.data[0] # Rename rename_feature(scikit_spec, 'input', 'renamed_input') renamed_model = MLModel(scikit_spec) # Check the predictions self.assertEquals( model.predict({'input': sample_data}), renamed_model.predict({'renamed_input': sample_data}))
def test_future_version(self): self.spec.specificationVersion = 10000 filename = tempfile.mktemp(suffix='.mlmodel') save_spec(self.spec, filename, auto_set_specification_version=False) model = MLModel(filename) # this model should exist, but throw an exception when we try to use # predict because the engine doesn't support this model version self.assertIsNotNone(model) with self.assertRaises(Exception): try: model.predict({}) except Exception as e: assert 'Core ML model specification version' in str(e) raise self.spec.specificationVersion = 1
def test_rename_output(self): rename_feature(self.spec, 'output', 'renamed_output', rename_inputs=False, rename_outputs=True) model = MLModel(self.spec) preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0}) self.assertIsNotNone(preds) self.assertEquals(preds['renamed_output'], 3.1) rename_feature(self.spec, 'renamed_output', 'output', rename_inputs=False, rename_outputs=True)
def test_linear_quant_inner_product_3bit(self): W = np.reshape(np.arange(6), (2, 3)).astype(np.uint8) input_features = [("data", datatypes.Array(3))] output_features = [("probs", None)] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product( name="ip1", W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 3).tobytes(), b=None, input_channels=3, output_channels=2, has_bias=False, input_name="data", output_name="probs", quantization_type="linear", nbits=3, quant_scale=[11.0, 2.0], quant_bias=[-2.0, 10.0], ) mlmodel = MLModel(builder.spec) data = np.array([1.0, 3.0, 5.0]) data_dict = {"data": data} probs = mlmodel.predict(data_dict)["probs"] expected_out = np.array([125, 170]) self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten()))
def test_predict_api(self): model = MLModel(self.spec) package = tempfile.TemporaryDirectory(suffix=".mlpackage") package.cleanup() model.save(package.name) if utils._macos_version() >= (12, 0): for compute_units in coremltools.ComputeUnit: if (compute_units == coremltools.ComputeUnit.CPU_AND_NE and utils._macos_version() < (13, 0)): continue loaded_model = MLModel(package.name, compute_units=compute_units) preds = loaded_model.predict({ "feature_1": 1.0, "feature_2": 1.0 }) assert preds is not None assert preds["output"] == 3.1 assert loaded_model.compute_unit == compute_units else: # just check if we can load it loaded_model = MLModel(package.name) # cleanup _remove_path(package.name)
def test_linear_quant_batchedmatmul_5bit(self): W = np.zeros((2, 3), dtype=np.uint8) W[0, :] = [31, 20, 11] W[1, :] = [1, 0, 8] quant_scale = np.reshape(np.array([10.0, 2.0, 3.0]), (1, 3)) quant_bias = np.reshape(np.array([-2.0, -10.0, 6.0]), (1, 3)) W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to(quant_bias, (2, 3)) bias = np.array([1.0, 2.0, 3.0]) input_features = [('data', datatypes.Array(2, 2))] output_features = [('out', None)] builder = NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True) builder.add_batched_mat_mul(name='batched_matmul', input_names=['data'], output_name='out', weight_matrix_rows=2, weight_matrix_columns=3, W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 5).tobytes(), bias=bias, is_quantized_weight=True, quantization_type='linear', nbits=5, quant_scale=quant_scale.flatten(), quant_bias=quant_bias.flatten()) mlmodel = MLModel(builder.spec) data = np.zeros((2, 2), dtype=np.float32) data[0, :] = [5, 6] data[1, :] = [10, 12] data_dict = {'data': data} out = mlmodel.predict(data_dict, useCPUOnly=True)['out'] expected_out = np.matmul(data, W_unquantized) + bias self.assertTrue(out.shape == expected_out.shape) self.assertTrue(np.allclose(out.flatten(), expected_out.flatten()))
def CoreMLEmit(original_framework, architecture_name, architecture_path, weight_path, image_path): from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter from coremltools.models import MLModel original_framework = checkfrozen(original_framework) # scale, b, g, r, BGRTranspose prep_for_coreml = { 'inception_v3': [0.00784313771874, -1.0, -1.0, -1.0, False], 'vgg16': [1.0, -103.939002991, -116.778999329, -123.680000305, True], 'resnet50': [1.0, -103.939002991, -116.778999329, -123.680000305, True], 'mobilenet': [ 0.0170000009239, -1.76698005199, -1.98526000977, -2.10256004333, True ], 'tinyyolo': [0.00392156885937, 0, 0, 0, False] } # IR to Model # converted_file = original_framework + '_coreml_' + architecture_name + "_converted" # converted_file = converted_file.replace('.', '_') # image func = TestKit.preprocess_func[original_framework][architecture_name] img = func(image_path) prep_list = prep_for_coreml[architecture_name] emitter = CoreMLEmitter(architecture_path, weight_path) model, input_name, output_name = emitter.gen_model( input_names=None, output_names=None, image_input_names=image_path, is_bgr=prep_list[4], red_bias=prep_list[3], green_bias=prep_list[2], blue_bias=prep_list[1], gray_bias=0.0, image_scale=prep_list[0], class_labels=None, predicted_feature_name=None, predicted_probabilities_output='') input_name = str(input_name[0][0]) output_name = str(output_name[0][0]) # load model model = MLModel(model) # inference coreml_input = {input_name: img} coreml_output = model.predict(coreml_input) prob = coreml_output[output_name] prob = np.array(prob).squeeze() return prob
def test_lut_quant_inner_product_1bit(self): W = np.zeros((2, 3), dtype=np.uint8) W[0, :] = [0, 1, 1] W[1, :] = [1, 0, 0] input_features = [("data", datatypes.Array(3))] output_features = [("probs", None)] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product( name="ip1", W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 1).tobytes(), b=None, input_channels=3, output_channels=2, has_bias=False, input_name="data", output_name="probs", quantization_type="lut", nbits=1, quant_lut=[5.0, -3.0], ) mlmodel = MLModel(builder.spec) data = np.array([1.0, 3.0, 5.0]) data_dict = {"data": data} probs = mlmodel.predict(data_dict)["probs"] expected_out = np.array([-19, 37]) self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten()))
def test_predict_api(self): model = MLModel(self.spec) package = tempfile.TemporaryDirectory(suffix=".mlpackage") package.cleanup() model.save(package.name) if utils._macos_version() >= (12, 0): for compute_units in coremltools.ComputeUnit: loaded_model = MLModel(package.name, compute_units=compute_units) preds = loaded_model.predict({ "feature_1": 1.0, "feature_2": 1.0 }) self.assertIsNotNone(preds) self.assertEqual(preds["output"], 3.1) self.assertEqual(loaded_model.compute_unit, compute_units) else: # just check if we can load it loaded_model = MLModel(package.name) # cleanup MLModelTest._remove_path(package.name)
def inference(cls, architecture, model_path, image_path): # TODO import numpy as np from coremltools.models._infer_shapes_nn_mlmodel import infer_shapes if cls.sanity_check(architecture): func = TestKit.preprocess_func['coreml'][architecture] img = func(image_path) # load model model = MLModel(model_path) spec = model.get_spec() # TODO: Multiple inputs input_name = spec.description.input[0].name # TODO: Multiple outputs output_name = spec.description.output[0].name # inference input_data = img coreml_input = {input_name: img} coreml_output = model.predict(coreml_input) prob = coreml_output[output_name].values() prob = np.array(prob).squeeze() return prob else: return None
def test_nn_classifier_util_file(self): input_features = [("data", datatypes.Array(3))] output_features = [("out", datatypes.Array(3))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec mlmodel = MLModel(spec) class_labels = ["a", "b", "c"] with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f: f.write("\n".join(class_labels)) f.flush() mlmodel = make_nn_classifier( mlmodel, class_labels=f.name, predicted_feature_name="out_confidence", predicted_probabilities_output="out", ) out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True) self.assertEqual(out_dict["out_confidence"], "c") self.assertEqual( mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" )
def test_lut_quant_embedding_nd_2bit(self): embed_size = 2 vocab_size = 3 W = np.zeros((embed_size, vocab_size), dtype=np.uint8) W[:, 0] = [1, 0] W[:, 1] = [0, 1] W[:, 2] = [3, 2] bias = np.array([1.0, 2.0]) quant_lut = np.array([34.0, 12.0, -6.0, 6.0]) input_features = [('data', datatypes.Array(4, 1))] output_features = [('out', None)] builder = NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True) builder.add_embedding_nd(name='embedding_nd', input_name='data', output_name='out', vocab_size=vocab_size, embedding_size=embed_size, W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 2).tobytes(), b=bias, is_quantized_weight=True, quantization_type='lut', nbits=2, quant_lut=quant_lut) mlmodel = MLModel(builder.spec) data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1)) data_dict = {'data': data} out = mlmodel.predict(data_dict, useCPUOnly=True)['out'] expected_out = np.zeros((4, embed_size), dtype=np.float32) expected_out[0, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias expected_out[1, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias expected_out[2, :] = [quant_lut[W[0, 1]], quant_lut[W[1, 1]]] + bias expected_out[3, :] = [quant_lut[W[0, 0]], quant_lut[W[1, 0]]] + bias self.assertTrue(out.shape == expected_out.shape) self.assertTrue(np.allclose(out.flatten(), expected_out.flatten()))
def test_multiarray_to_image_input_util_HWC_format(self): H, W, C = 1, 1, 3 input_features = [("data", datatypes.Array(H, W, C))] output_features = [("out", datatypes.Array(H, W, C))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec mlmodel = MLModel(spec) mlmodel = make_image_input( mlmodel, "data", red_bias=-5, green_bias=-6, blue_bias=-2.5, scale=10.0, image_format="NHWC", ) x = np.array([4, 2, 5], dtype=np.uint8) x = np.reshape(x, (H, W, C)) pil_img = PIL.Image.fromarray(x) y = mlmodel.predict({"data": pil_img}, useCPUOnly=True)["out"] self.assertEqual(y.shape, (H, W, C)) np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5])
def test_rename_input(self): utils.rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) model = MLModel(self.spec) package = tempfile.TemporaryDirectory(suffix=".mlpackage") package.cleanup() model.save(package.name) loaded_model = MLModel(package.name) if utils._macos_version() >= (12, 0): preds = loaded_model.predict({ "renamed_feature": 1.0, "feature_2": 1.0 }) assert preds is not None assert preds["output"] == 3.1 # reset the spec for next run utils.rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True) # cleanup _remove_path(package.name)
def test_rename_output_bad(self): rename_feature( self.spec, "blah", "bad_name", rename_inputs=False, rename_outputs=True ) model = MLModel(self.spec) preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) self.assertIsNotNone(preds) self.assertEqual(preds["output"], 3.1)
def test_rename_input(self): rename_feature(self.spec, 'feature_1', 'renamed_feature', rename_inputs=True) model = MLModel(self.spec) preds = model.predict({'renamed_feature': 1.0, 'feature_2': 1.0}) self.assertIsNotNone(preds) self.assertEquals(preds['output'], 3.1) # reset the spec for next run rename_feature(self.spec, 'renamed_feature', 'feature_1', rename_inputs=True)
def test_rename_input(self): rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) model = MLModel(self.spec) preds = model.predict({"renamed_feature": 1.0, "feature_2": 1.0}) self.assertIsNotNone(preds) self.assertEqual(preds["output"], 3.1) # reset the spec for next run rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True)
def test_pipeline_rename(self): # Convert scikit_spec = converter.convert(self.scikit_model).get_spec() model = MLModel(scikit_spec) sample_data = self.scikit_data.data[0] # Rename rename_feature(scikit_spec, "input", "renamed_input") renamed_model = MLModel(scikit_spec) # Check the predictions if _is_macos() and _macos_version() >= (10, 13): out_dict = model.predict({"input": sample_data}) out_dict_renamed = renamed_model.predict({"renamed_input": sample_data}) self.assertAlmostEqual(list(out_dict.keys()), list(out_dict_renamed.keys())) self.assertAlmostEqual( list(out_dict.values()), list(out_dict_renamed.values()) )
def main(): model = MLModel('pricing.mlmodel') with open('input.csv') as input: csv_reader = csv.reader(input, delimiter=',') with open('output.csv', mode='w') as output: csv_writer = csv.writer(output, delimiter=',') for row in csv_reader: sqft = int(row[0]) price = int(model.predict({'sqft': sqft})['price']) csv_writer.writerow([sqft, price]) print('Saved output.csv')
def _test_builder(self, builder, input_shape, expected_layer_num=None): data = np.random.rand(*input_shape) # Mlmodel before mlmodel = MLModel(builder.spec) output_before = mlmodel.predict({"data": data})["out"] num_layers_before = len(builder.spec.neuralNetwork.layers) remove_redundant_transposes(builder.spec) layers = builder.spec.neuralNetwork.layers if expected_layer_num == None: self.assertTrue(len(layers) < num_layers_before) else: self.assertEqual(len(layers), expected_layer_num) # Mlmodel after mlmodel = MLModel(builder.spec) output_after = mlmodel.predict({"data": data})["out"] np.testing.assert_almost_equal(output_before, output_after, decimal=3)
def main(): parser = argparse.ArgumentParser(description='Stylize image using CoreML') parser.add_argument('-input', required=True, help='Path to input image') parser.add_argument('-output', required=True, help='Output path') parser.add_argument('-model', required=True, help='CoreML model path') args = parser.parse_args() image = Image.open(args.input) net = MLModel(args.model) stylized_image = net.predict({'inputImage': image})['outputImage'] stylized_image = stylized_image.convert('RGB') stylized_image.save(args.output)
def test_linear_quant_embedding_7bit(self): embed_size = 2 vocab_size = 3 W = np.zeros((embed_size, vocab_size), dtype=np.uint8) W[:, 0] = [100, 127] W[:, 1] = [20, 40] W[:, 2] = [90, 1] quant_scale = np.reshape(np.array([10.0, 2.0]), (2, 1)) quant_bias = np.reshape(np.array([-2.0, -10.0]), (2, 1)) W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to( quant_bias, (2, 3) ) bias = np.reshape(np.array([1.0, 2.0]), (2, 1)) W_unquantized = W_unquantized + np.broadcast_to(bias, (2, 3)) input_features = [("data", datatypes.Array(4, 1, 1, 1))] output_features = [("out", None)] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_embedding( name="embed", W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 7).tobytes(), b=bias, input_dim=vocab_size, output_channels=embed_size, has_bias=True, input_name="data", output_name="out", is_quantized_weight=True, quantization_type="linear", nbits=7, quant_scale=quant_scale, quant_bias=quant_bias, ) mlmodel = MLModel(builder.spec) data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1, 1, 1)) data_dict = {"data": data} out = mlmodel.predict(data_dict, useCPUOnly=True)["out"] self.assertTrue(out.shape == (4, embed_size, 1, 1)) expected_out = np.zeros((4, embed_size), dtype=np.float32) expected_out[0, :] = W_unquantized[:, 2].flatten() expected_out[1, :] = W_unquantized[:, 2].flatten() expected_out[2, :] = W_unquantized[:, 1].flatten() expected_out[3, :] = W_unquantized[:, 0].flatten() self.assertTrue(np.allclose(out.flatten(), expected_out.flatten()))
def test_rename_input_bad(self): rename_feature(self.spec, "blah", "bad_name", rename_inputs=True) model = MLModel(self.spec) package = tempfile.TemporaryDirectory(suffix=".mlpackage") package.cleanup() model.save(package.name) loaded_model = MLModel(package.name) if utils._macos_version() >= (12, 0): preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) self.assertIsNotNone(preds) self.assertEqual(preds["output"], 3.1) # cleanup MLModelTest._remove_path(package.name)
def test_undefined_shape_single_output(self): W = np.ones((3, 3)) input_features = [('data', datatypes.Array(3))] output_features = [('probs', None)] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product(name='ip1', W=W, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='data', output_name='probs') mlmodel = MLModel(builder.spec) data = np.ones((3, )) data_dict = {'data': data} probs = mlmodel.predict(data_dict)['probs'] self.assertTrue(np.allclose(probs, np.ones(3) * 3))
def test_save(self): model = MLModel(self.spec) # Verify "save" can be called twice and the saved # model can be loaded successfully each time for _ in range(0, 2): package = tempfile.TemporaryDirectory(suffix=".mlpackage") package.cleanup() model.save(package.name) loaded_model = MLModel(package.name) if utils._macos_version() >= (12, 0): preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) assert preds is not None assert preds["output"] == 3.1 _remove_path(package.name)
def inference(cls, architecture, model_path, image_path): # TODO from PIL import Image import numpy as np from coremltools.models._infer_shapes_nn_mlmodel import infer_shapes if cls.sanity_check(architecture): func = TestKit.preprocess_func['coreml'][architecture] import inspect funcstr = inspect.getsource(func) if len(funcstr.split(',')) == 3: size = int(funcstr.split('path,')[1].split(')')[0]) else: size = int(funcstr.split('path,')[1].split(',')[0]) img = Image.open(image_path) img = img.resize((size, size)) # load model model = MLModel(model_path) spec = model.get_spec() # TODO: Multiple inputs input_name = spec.description.input[0].name # TODO: Multiple outputs output_name = spec.description.output[0].name # inference input_data = img coreml_input = {input_name: img} coreml_output = model.predict(coreml_input) prob = coreml_output[output_name] if isinstance(prob, dict): prob = coreml_output[output_name].values() prob = np.array(prob).squeeze() return prob else: return None
def test_rename_image_input(self): input_features = [("data", datatypes.Array(3, 1, 1))] output_features = [("out", datatypes.Array(3, 1, 1))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec # make an image input mlmodel = make_image_input(MLModel(spec), "data", image_format="NCHW", scale=2.0) # rename the input spec = mlmodel.get_spec() rename_feature(spec, "data", "new_input_name") mlmodel = MLModel(spec) # test x = np.array([4, 5, 6], dtype=np.uint8).reshape(1, 1, 3) pil_img = PIL.Image.fromarray(x) out = mlmodel.predict({"new_input_name": pil_img}, useCPUOnly=True)['out'] np.testing.assert_equal(out, np.array([8.0, 10.0, 12.0]).reshape(3, 1, 1))