def _evaluate_and_test_model_meanpreprocessing(self, n):

        failed_tests_load = []
        failed_tests_conversion = []
        failed_tests_evaluation = []

        extract_tarfile('{}nets/{}.gz'.format(nets_path, FOLDER_NAME), '{}nets/'.format(nets_path))

        path_prototxt = '{}nets/{}/{}/image{}.prototxt'.format(nets_path, FOLDER_NAME, str(n), str(n))
        path_caffemodel = '{}nets/{}/{}/image{}.caffemodel'.format(nets_path, FOLDER_NAME, str(n), str(n))
        path_mlmodel = '{}nets/{}/{}/image{}.mlmodel'.format(nets_path, FOLDER_NAME, str(n), str(n))
        if n == 1:
            path_binaryproto = '{}nets/{}/1/mean_binary_proto1.binaryproto'.format(nets_path, FOLDER_NAME)
        else:
            path_binaryproto = dict()
            for i in range(n):
                path_binaryproto["data{}".format(str(i+1))] = '{}nets/{}/{}/mean_binary_proto{}.binaryproto'.format(nets_path, FOLDER_NAME, str(n), str(i+1))

        image_input_names = []
        for i in range(n):
            image_input_names.append("data{}".format(str(i+1)))

        #convert it
        try:
            model = caffe_converter.convert((path_caffemodel, path_prototxt, path_binaryproto), image_input_names = image_input_names)
            model.save(path_mlmodel)
        except RuntimeError as e:
            print(e)
            failed_tests_conversion.append('image mean preprocessing: conversion failure')

        #load it (compile it)
        load_result = load_mlmodel(path_mlmodel)
        if load_result is False:
            failed_tests_load.append('image mean preprocessing: load failure')


        #load Caffe's input and output
        with open('{}nets/{}/{}/input.json'.format(nets_path, FOLDER_NAME, str(n))) as data_file:
            input_data_dict = json.load(data_file)
        with open('{}nets/{}/{}/output.json'.format(nets_path, FOLDER_NAME, str(n))) as data_file:
            output_data_dict = json.load(data_file)

        output_data = np.array(output_data_dict["output_data"])

        coreml_input_dict = dict()

        for i in range(n):
            input_data = np.array(input_data_dict["input_data{}".format(str(i+1))]).astype(np.uint8)
            img = PIL.Image.fromarray(np.transpose(input_data[0,:,:,:],[1,2,0]))
            coreml_input_dict["data{}".format(str(i+1))] = img

        #load and evaluate mlmodel
        mlmodel = coremltools.models.MLModel(path_mlmodel)
        if macos_version() >= (10, 13):
            coreml_out = mlmodel.predict(coreml_input_dict)['output']

            caffe_preds = output_data.flatten()
            coreml_preds = coreml_out.flatten()
            if len(caffe_preds) != len(coreml_preds):
                failed_tests_evaluation.append('single image mean preprocessing: evaluation failure')

            max_relative_error = compare_models(output_data.flatten(), coreml_out.flatten())
            if max_relative_error > 0.001:
                failed_tests_evaluation.append('single image mean preprocessing: evaluation failure')

            self.assertEqual(failed_tests_conversion,[])
            self.assertEqual(failed_tests_load,[])
            self.assertEqual(failed_tests_evaluation,[])
        shutil.rmtree('{}nets/{}'.format(nets_path, FOLDER_NAME))
import numpy as np

import coremltools
from coremltools.models import datatypes, MLModel
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models.neural_network.quantization_utils import \
    _convert_array_to_nbit_quantized_bytes, quantize_weights
from coremltools.models.utils import macos_version, is_macos

MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_14_MACOS_VERSION = (10, 14)
LAYERS_10_15_MACOS_VERSION = (10, 15)


@unittest.skipIf(not is_macos()
                 or macos_version() < LAYERS_10_15_MACOS_VERSION,
                 'Only supported on macOS 10.15+')
class ControlFlowCorrectnessTest(unittest.TestCase):
    @classmethod
    def setup_class(cls):
        pass

    def runTest():
        pass

    def _test_model(self, model, input_dict, output_ref, delta=1e-2):
        preds = model.predict(input_dict)
        for name in output_ref:
            ref_val = output_ref[name]
            val = preds[name]
            self.assertTrue(np.allclose(val, ref_val, rtol=delta))
Exemple #3
0
def get_coreml_predictions_depthwise(X, params, w):
    coreml_preds = []
    eval = True

    try:
        input_dim = X.shape[2:]
        output_dim = (
            1, 1, 1
        )  #some random dimensions here: we are going to remove this information later
        input_features = [('data', datatypes.Array(*input_dim))]
        output_features = [('output', datatypes.Array(*output_dim))]
        builder = neural_network.NeuralNetworkBuilder(input_features,
                                                      output_features)

        #tranlate weights : (Kh, Kw, kernel_channels, output_channels) == (Kh, Kw, Cin/g, Cout) == (Kh, Kw, 1, channel_multiplier * Cin)
        w_e = np.reshape(w, (params["kernel_size"], params["kernel_size"],
                             params["multiplier"] * params["C"], 1))
        w_e = np.transpose(w_e, [0, 1, 3, 2])

        if params["padding"] == 'SAME':
            pad_mode = 'same'
        else:
            pad_mode = 'valid'

        builder.add_convolution('conv',
                                kernel_channels=1,
                                output_channels=params["multiplier"] *
                                params["C"],
                                height=params["kernel_size"],
                                width=params["kernel_size"],
                                stride_height=params["stride"],
                                stride_width=params["stride"],
                                border_mode=pad_mode,
                                groups=params["C"],
                                W=w_e,
                                b=None,
                                has_bias=0,
                                is_deconv=0,
                                output_shape=None,
                                input_name='data',
                                output_name='output')

        #Remove output shape by deleting and adding an output
        del builder.spec.description.output[-1]
        output = builder.spec.description.output.add()
        output.name = 'output'
        output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value(
            'DOUBLE')
        #save the model
        model_dir = tempfile.mkdtemp()
        model_path = os.path.join(model_dir, 'test_layer.mlmodel')
        coremltools.utils.save_spec(builder.spec, model_path)
        #preprare input and get predictions
        coreml_model = coremltools.models.MLModel(model_path)
        coreml_input = {'data': X}
        if macos_version() >= (10, 13):
            coreml_preds = coreml_model.predict(coreml_input)['output']
        else:
            coreml_preds = None
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
    except RuntimeError as e:
        print(e)
        eval = False

    return coreml_preds, eval
    def test_keras_2_image_bias(self):

        #define Keras model and get prediction
        input_shape1 = (100,60,3)
        input_shape2 = (23,45,3)

        data1 = Input(shape=input_shape1)
        data2 = Input(shape=input_shape2)
        a_pool = GlobalMaxPooling2D()(data1)
        b_pool = GlobalMaxPooling2D()(data2)
        output = keras.layers.add([a_pool, b_pool])
        model = Model(inputs=[data1, data2], outputs=output)

        data1 = np.ones(input_shape1)
        data2 = np.ones(input_shape2)
        keras_input1 = np.ones(input_shape1)
        keras_input2 = np.ones(input_shape2)

        data1[:,:,0] = 100.0
        data1[:,:,1] = 79.0
        data1[:,:,2] = 194.0

        data2[:,:,0] = 130.0
        data2[:,:,1] = 91.0
        data2[:,:,2] = 11.0


        red_bias1 = -88.0;
        green_bias1 = -2;
        blue_bias1 = -40;

        red_bias2 = -100.0;
        green_bias2 = -29;
        blue_bias2 = -15;

        keras_input1[:,:,0] = data1[:,:,2] + blue_bias1;
        keras_input1[:,:,1] = data1[:,:,1] + green_bias1;
        keras_input1[:,:,2] = data1[:,:,0] + red_bias1;

        keras_input2[:,:,0] = data2[:,:,0] + red_bias2;
        keras_input2[:,:,1] = data2[:,:,1] + green_bias2;
        keras_input2[:,:,2] = data2[:,:,2] + blue_bias2;

        keras_preds = model.predict([np.expand_dims(keras_input1, axis = 0), np.expand_dims(keras_input2, axis = 0)])
        keras_preds = keras_preds.flatten()

        #convert to coreml and get predictions
        model_dir = tempfile.mkdtemp()
        model_path = os.path.join(model_dir, 'keras.mlmodel')
        from coremltools.converters import keras as keras_converter
        coreml_model = keras_converter.convert(model, input_names = ['data1', 'data2'], output_names = ['output'],
                                                image_input_names = ['data1', 'data2'],
                                                red_bias = {'data1': red_bias1, 'data2': red_bias2},
                                                green_bias = {'data1': green_bias1, 'data2': green_bias2},
                                                blue_bias = {'data1': blue_bias1, 'data2': blue_bias2},
                                                is_bgr = {'data1': True, 'data2': False})
        #coreml_model.save(model_path)
        #coreml_model = coremltools.models.MLModel(model_path)

        if macos_version() >= (10, 13):
            coreml_input_dict = dict()
            coreml_input_dict["data1"] = PIL.Image.fromarray(data1.astype(np.uint8))
            coreml_input_dict["data2"] = PIL.Image.fromarray(data2.astype(np.uint8))
            coreml_preds = coreml_model.predict(coreml_input_dict)['output'].flatten()

            #compare
            self.assertEquals(len(keras_preds), len(coreml_preds))
            max_relative_error = compare_models(keras_preds, coreml_preds)
            self.assertAlmostEquals(max(max_relative_error, .001), .001, delta = 1e-6)

        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
Exemple #5
0
    def CoreMLEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):


        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        from coremltools.models import MLModel
        import coremltools
        from PIL import Image


        original_framework = checkfrozen(original_framework)


        def prep_for_coreml(prename, BGRTranspose):
            # The list is in RGB oder
            if prename == 'Standard':
                return 0.00784313725490196,-1, -1, -1
            elif prename == 'ZeroCenter' :
                return 1, -123.68, -116.779, -103.939
            elif prename == 'Identity':
                return 1, 1, 1, 1
            else:
                raise ValueError()


        # IR to Model
        # converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        # converted_file = converted_file.replace('.', '_')

        func = TestKit.preprocess_func[original_framework][architecture_name]

        import inspect
        funcstr = inspect.getsource(func)

        coreml_pre = funcstr.split('(')[0].split('.')[-1]

        if len(funcstr.split(',')) == 3:
            BGRTranspose = bool(0)
            size = int(funcstr.split('path,')[1].split(')')[0])
            prep_list = prep_for_coreml(coreml_pre, BGRTranspose)
        elif  len(funcstr.split(',')) == 4:
            BGRTranspose = funcstr.split(',')[-2].split(')')[0].strip() == str(True)
            size = int(funcstr.split('path,')[1].split(',')[0])
            prep_list = prep_for_coreml(coreml_pre, BGRTranspose)

        elif len(funcstr.split(',')) == 11:
            BGRTranspose = funcstr.split(',')[-2].split(')')[0].strip() == str(True)

            size = int(funcstr.split('path,')[1].split(',')[0])
            prep_list = (   float(funcstr.split(',')[2]),
                            float(funcstr.split(',')[3].split('[')[-1]),
                            float(funcstr.split(',')[4]),
                            float(funcstr.split(',')[5].split(']')[0])
                        )




        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
                input_names=None,
                output_names=None,
                image_input_names=image_path,
                is_bgr=BGRTranspose,
                red_bias=prep_list[1],
                green_bias=prep_list[2],
                blue_bias=prep_list[3],
                gray_bias=0.0,
                image_scale=prep_list[0],
                class_labels=None,
                predicted_feature_name=None,
                predicted_probabilities_output=''
            )

        input_name = str(input_name[0][0])
        output_name = str(output_name[0][0])

        # load model
        model = MLModel(model)


        # save model
        # coremltools.utils.save_spec(model.get_spec(), converted_file)

        from coremltools.models.utils import macos_version

        if macos_version() < (10, 13):
            return None
        else:

            from PIL import Image as pil_image
            img = pil_image.open(image_path)
            img = img.resize((size, size))

            # inference

            coreml_input = {input_name: img}
            coreml_output = model.predict(coreml_input)
            prob = coreml_output[output_name]
            prob = np.array(prob).squeeze()

            return prob
Exemple #6
0
    def _test_evaluation(self, allow_slow):
        """
        Test that the same predictions are made
        """

        # Generate some smallish (some kernels take too long on anything else) random data
        x, y = [], []
        for _ in range(50):
            cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)
            x.append([cur_x1, cur_x2])
            y.append(1 + 2 * cur_x1 + 3 * cur_x2)

        input_names = ['x1', 'x2']
        df = pd.DataFrame(x, columns=input_names)

        # Parameters to test
        kernel_parameters = [{}, {
            'kernel': 'rbf',
            'gamma': 1.2
        }, {
            'kernel': 'linear'
        }, {
            'kernel': 'poly'
        }, {
            'kernel': 'poly',
            'degree': 2
        }, {
            'kernel': 'poly',
            'gamma': 0.75
        }, {
            'kernel': 'poly',
            'degree': 0,
            'gamma': 0.9,
            'coef0': 2
        }, {
            'kernel': 'sigmoid'
        }, {
            'kernel': 'sigmoid',
            'gamma': 1.3
        }, {
            'kernel': 'sigmoid',
            'coef0': 0.8
        }, {
            'kernel': 'sigmoid',
            'coef0': 0.8,
            'gamma': 0.5
        }]
        non_kernel_parameters = [{}, {
            'C': 1
        }, {
            'C': 1.5,
            'shrinking': True
        }, {
            'C': 0.5,
            'shrinking': False,
            'nu': 0.9
        }]

        # Test
        for param1 in non_kernel_parameters:
            for param2 in kernel_parameters:
                cur_params = param1.copy()
                cur_params.update(param2)

                cur_model = NuSVR(**cur_params)
                cur_model.fit(x, y)
                df['prediction'] = cur_model.predict(x)

                spec = scikit_converter.convert(cur_model, input_names,
                                                'target')

                if macos_version() >= (10, 13):
                    metrics = evaluate_regressor(spec, df)
                    self.assertAlmostEquals(metrics['max_error'], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Exemple #7
0
class OneHotEncoderScikitTest(unittest.TestCase):
    """
    Unit test class for testing scikit-learn converter.
    """
    @classmethod
    def setUpClass(self):
        """
        Set up the unit test by loading the dataset and training a model.
        """
        scikit_data = [[0], [1], [2], [4], [3], [2], [4], [5], [6], [7]]
        scikit_data_multiple_cols = [[0, 1], [1, 0], [2, 2], [3, 3], [4, 4]]
        scikit_model = OneHotEncoder()
        scikit_model.fit(scikit_data)

        # Save the data and the model
        self.scikit_data = np.asarray(scikit_data, dtype='d')
        self.scikit_data_multiple_cols = np.asarray(scikit_data_multiple_cols,
                                                    dtype='d')
        self.scikit_model = scikit_model

    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_conversion_one_column(self):
        # Fit a single OHE
        scikit_model = OneHotEncoder()
        scikit_model.fit(self.scikit_data)
        spec = sklearn.convert(scikit_model, 'single_feature',
                               'out').get_spec()

        test_data = [{'single_feature': row} for row in self.scikit_data]
        scikit_output = [{
            'out': row
        } for row in scikit_model.transform(self.scikit_data).toarray()]
        metrics = evaluate_transformer(spec, test_data, scikit_output)

        self.assertIsNotNone(spec)
        self.assertIsNotNone(spec.description)
        self.assertEquals(metrics['num_errors'], 0)

    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_conversion_many_columns(self):
        scikit_model = OneHotEncoder()
        scikit_model.fit(self.scikit_data_multiple_cols)
        spec = sklearn.convert(scikit_model, ['feature_1', 'feature_2'],
                               'out').get_spec()

        test_data = [{
            'feature_1': row[0],
            'feature_2': row[1]
        } for row in self.scikit_data_multiple_cols]
        scikit_output = [{
            'out': row
        } for row in scikit_model.transform(
            self.scikit_data_multiple_cols).toarray()]
        metrics = evaluate_transformer(spec, test_data, scikit_output)

        self.assertIsNotNone(spec)
        self.assertIsNotNone(spec.description)
        self.assertEquals(metrics['num_errors'], 0)

    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_conversion_one_column_of_several(self):
        scikit_model = OneHotEncoder(categorical_features=[0])
        scikit_model.fit(copy(self.scikit_data_multiple_cols))
        spec = sklearn.convert(scikit_model, ['feature_1', 'feature_2'],
                               'out').get_spec()

        test_data = [{
            'feature_1': row[0],
            'feature_2': row[1]
        } for row in self.scikit_data_multiple_cols]
        scikit_output = [{
            'out': row
        } for row in scikit_model.transform(
            self.scikit_data_multiple_cols).toarray()]
        metrics = evaluate_transformer(spec, test_data, scikit_output)

        self.assertIsNotNone(spec)
        self.assertIsNotNone(spec.description)
        self.assertEquals(metrics['num_errors'], 0)

    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_boston_OHE(self):
        data = load_boston()

        for categorical_features in [[3], [8], [3, 8], [8, 3]]:

            model = OneHotEncoder(categorical_features=categorical_features,
                                  sparse=False)
            model.fit(data.data, data.target)

            # Convert the model
            spec = sklearn.convert(model, data.feature_names, 'out').get_spec()

            input_data = [
                dict(zip(data.feature_names, row)) for row in data.data
            ]
            output_data = [{"out": row} for row in model.transform(data.data)]

            result = evaluate_transformer(spec, input_data, output_data)

            assert result["num_errors"] == 0

    # This test still isn't working
    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_boston_OHE_pipeline(self):
        data = load_boston()

        for categorical_features in [[3], [8], [3, 8], [8, 3]]:

            # Put it in a pipeline so that we can test whether the output dimension
            # handling is correct.

            model = Pipeline([
                ("OHE",
                 OneHotEncoder(categorical_features=categorical_features)),
                ("Normalizer", Normalizer())
            ])

            model.fit(data.data.copy(), data.target)

            # Convert the model
            spec = sklearn.convert(model, data.feature_names, 'out').get_spec()

            input_data = [
                dict(zip(data.feature_names, row)) for row in data.data
            ]
            output_data = [{
                "out": row
            } for row in model.transform(data.data.copy())]

            result = evaluate_transformer(spec, input_data, output_data)

            assert result["num_errors"] == 0

    @unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                         'Only supported on macOS 10.13+')
    def test_random_sparse_data(self):

        n_columns = 8
        n_categories = 20

        import numpy.random as rn
        rn.seed(0)
        categories = rn.randint(50000, size=(n_columns, n_categories))

        for dt in ['int32', 'float32', 'float64']:

            _X = np.array([[
                categories[j, rn.randint(n_categories)]
                for j in range(n_columns)
            ] for i in range(100)],
                          dtype=dt)

            # Test this data on a bunch of possible inputs.
            for sparse in (True, False):
                for categorical_features in [
                        'all', [3], [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8)
                ]:
                    X = _X.copy()

                    # This appears to be the only type now working.
                    assert X.dtype == np.dtype(dt)

                    model = OneHotEncoder(
                        categorical_features=categorical_features,
                        sparse=sparse)
                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))],
                                           'out')

                    X_out = model.transform(X)
                    if sparse:
                        X_out = X_out.todense()

                    input_data = [{'data': row} for row in X]
                    output_data = [{"out": row} for row in X_out]

                    result = evaluate_transformer(spec, input_data,
                                                  output_data)

                    assert result["num_errors"] == 0

            # Test normal data inside a pipeline
            for sparse in (True, False):
                for categorical_features in [
                        'all', [3], [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8)
                ]:
                    X = _X.copy()

                    model = Pipeline([
                        ("OHE",
                         OneHotEncoder(
                             categorical_features=categorical_features,
                             sparse=sparse)), ("Normalizer", Normalizer())
                    ])

                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))],
                                           'out').get_spec()

                    X_out = model.transform(X)
                    if sparse:
                        X_out = X_out.todense()

                    input_data = [{'data': row} for row in X]
                    output_data = [{"out": row} for row in X_out]

                    result = evaluate_transformer(spec, input_data,
                                                  output_data)

                    assert result["num_errors"] == 0

    def test_conversion_bad_inputs(self):
        # Error on converting an untrained model
        with self.assertRaises(TypeError):
            model = OneHotEncoder()
            spec = sklearn.convert(model, 'data', 'out')

        # Check the expected class during covnersion.
        with self.assertRaises(TypeError):
            from sklearn.linear_model import LinearRegression
            model = LinearRegression()
            spec = sklearn.convert(model, 'data', 'out')
Exemple #8
0
    def test_random_sparse_data(self): 

        n_columns = 8
        n_categories = 20

        import numpy.random as rn
        rn.seed(0)
        categories = rn.randint(50000, size = (n_columns, n_categories) )

        for dt in ['int32', 'float32', 'float64']:

            _X = np.array( [[categories[j,rn.randint(n_categories)] 
                             for j in range(n_columns)] 
                            for i in range(100)], dtype=dt)

            # Test this data on a bunch of possible inputs. 
            for sparse in (True, False): 
                for categorical_features in ['all', [3], [4], range(2,8), range(0,4), range(0,8)]:
                    X = _X.copy()

                    # This appears to be the only type now working.
                    assert X.dtype == np.dtype(dt)

                    model = OneHotEncoder(categorical_features = categorical_features, sparse=sparse)
                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))], 'out')

                    if macos_version() >= (10, 13):
                        X_out = model.transform(X)
                        if sparse:
                            X_out = X_out.todense()

                        input_data = [{'data' : row} for row in X]
                        output_data = [{"out" : row} for row in X_out]

                        result = evaluate_transformer(spec, input_data, output_data)

                        assert result["num_errors"] == 0

            # Test normal data inside a pipeline
            for sparse in (True, False): 
                for categorical_features in [ 'all', [3], [4], range(2,8), range(0,4), range(0,8)]:
                    X = _X.copy()

                    model = Pipeline([("OHE", OneHotEncoder(categorical_features = categorical_features, sparse=sparse)),
                                      ("Normalizer", Normalizer())])

                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))], 'out').get_spec()
                    
                    if macos_version() >= (10, 13):
                        X_out = model.transform(X)
                        if sparse:
                            X_out = X_out.todense()

                        input_data = [{'data' : row} for row in X]
                        output_data = [{"out" : row} for row in X_out]

                        result = evaluate_transformer(spec, input_data, output_data)

                        assert result["num_errors"] == 0
class OnnxModelTest(unittest.TestCase):

    def test_linear_no_bias(self, disable_rank5_mapping=False):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.simple_nn = nn.Sequential(nn.Linear(256, 128, bias=False), nn.ReLU())

            def forward(self, x):
                return self.simple_nn(x)

        torch_model = Net() # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1,256), (256), disable_rank5_mapping=disable_rank5_mapping) # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_linear_no_bias_disable_rank5_mapping(self):
        self.test_linear_no_bias(True)

    def test_linear_bias(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.simple_nn = nn.Sequential(nn.Linear(256, 128, bias=True), nn.ReLU())

            def forward(self, x):
                return self.simple_nn(x)

        torch_model = Net() # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1,256), (256)) # type: ignore

    def test_dynamic_reshape(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv = nn.Conv2d(in_channels=3,
                                      out_channels=32,
                                      kernel_size=(3, 3),
                                      stride=1, padding=0,
                                      bias=True)

            def forward(self, x):
                x = self.conv(x)
                x = x.view(x.size()[0], -1)
                return x

        torch_model = Net() # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 3, 100, 100), (3, 100, 100)) # type: ignore

    def test_const_initializer1(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.ones = torch.nn.Parameter(torch.ones(1,))

            def forward(self, x):
                y = x + self.ones
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 3), (3,))  # type: ignore


    def test_const_initializer2(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()

            def forward(self, x):
                y = x + torch.nn.Parameter(torch.ones(2, 3), requires_grad=False)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 2, 3), (1, 2, 3))  # type: ignore

    def test_conv2D_transpose(self): # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.convT = torch.nn.ConvTranspose2d(1, 1, kernel_size=3, stride=2, output_padding=1, padding=1, groups=1)

            def forward(self, x):
                y = self.convT(x)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 1, 2, 2), (1, 2, 2))  # type: ignore

    def test_conv2D_transpose_groups(self): # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.convT = torch.nn.ConvTranspose2d(4, 4, kernel_size=3, stride=2, output_padding=1, padding=1, groups=2)

            def forward(self, x):
                y = self.convT(x)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 4, 8, 8), (4, 8, 8))  # type: ignore

    def test_conv2D_transpose_2(self): # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.convT = torch.nn.ConvTranspose2d(1, 1, kernel_size=3, stride=3, output_padding=2, padding=1, groups=1)

            def forward(self, x):
                y = self.convT(x)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 1, 3, 3), (1, 3, 3))  # type: ignore

    def test_pow(self): # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()

            def forward(self, x):
                y = x.pow(3)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (3, 2, 3), (3, 2, 3))  # type: ignore

    @unittest.skip("Disable test until support for ConstantOfShape is added")
    def test_lstm(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.lstm = nn.LSTM(input_size=256,
                                hidden_size=64,
                                num_layers=1)

            def forward(self, x):
                y = self.lstm(x)
                return y

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (3, 1, 256), (3, 1, 256))  # type: ignore

    def test_1d_conv(self):
        class Net(nn.Module):
            def __init__(self, in_channels,
                               out_channels,
                               kernel_size,
                               stride=1,
                               dilation=1,
                               groups=1,
                               bias=True):

                super(Net, self).__init__()

                self.conv = torch.nn.Conv1d(in_channels,
                                            out_channels,
                                            kernel_size=kernel_size,
                                            stride=stride,
                                            padding=0,
                                            dilation=dilation,
                                            groups=groups,
                                            bias=bias)

                self.__padding = (kernel_size - 1) * dilation

            def forward(self, x):
                result = self.conv(x)
                if self.__padding != 0:
                    return result[:, :, :-self.__padding]
                return result

        B = 1
        Cin = 5
        Cout = 11
        k = 3
        Win = 15
        torch_model = Net(Cin, Cout, k)  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, Cin, Win), (Cin, 1, Win))  # type: ignore

    def test_conv1d_after_reshape(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv = torch.nn.Conv1d(in_channels=300,
                                            out_channels=32,
                                            kernel_size=3,
                                            stride=1,
                                            padding=0,
                                            bias=True)

            def forward(self, x):
                x = x.view(1, 300, 100)
                x = self.conv(x)
                return x

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 3, 100, 100), (3, 100, 100))  # type: ignore

    def test_bachnorm_after_reshape(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv = torch.nn.Conv1d(in_channels=300,
                                            out_channels=32,
                                            kernel_size=3,
                                            stride=1,
                                            padding=0,
                                            bias=True)
                self.bachnorm = nn.BatchNorm1d(32)

            def forward(self, x):
                x = x.view(1, 300, 100)
                x = self.conv(x)
                x = self.bachnorm(x)
                return x

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 3, 100, 100), (3, 100, 100))  # type: ignore

    def test_res_connect_downsampling_after_reshape(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv = torch.nn.Conv1d(in_channels=300,
                                            out_channels=32,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1,
                                            bias=True)
                self.downsample = torch.nn.Conv1d(in_channels=300,
                                                  out_channels=32,
                                                  kernel_size=1,
                                                  stride=1,
                                                  padding=0,
                                                  bias=True)

            def forward(self, x):
                x = x.view(1, 300, 100)
                y = self.conv(x)
                res = self.downsample(x)
                return y + res

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 3, 100, 100), (3, 100, 100))  # type: ignore

    def test_fc_plus_convenet(self):  # type: () -> None
        class Net(nn.Module):
            def __init__(self, channel_size = 1, output_h = 16, output_w = 16, filter_num = 32, latent_size = 16):
                super(Net, self).__init__()
                self.channel_size = channel_size
                self.output_h = output_h
                self.output_w = output_w
                self.filter_num = filter_num
                self.latent_size = latent_size

                self.fc3 = nn.Linear(latent_size, 128)
                self.fc4 = nn.Linear(128, 256)

                self.relu = nn.ReLU()

                self.convt = nn.Sequential(
                    nn.ConvTranspose2d(256, self.filter_num * 4, 4, 1),
                    nn.BatchNorm2d(self.filter_num * 4),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(self.filter_num * 4, self.filter_num * 2, 4, 1),
                    nn.BatchNorm2d(self.filter_num * 2),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(self.filter_num * 2, self.filter_num, 4, 1),
                    nn.BatchNorm2d(self.filter_num),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(self.filter_num, self.filter_num, 4, 1),
                    nn.BatchNorm2d(self.filter_num),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(self.filter_num, 1, 4, 1),
                    nn.Sigmoid()
                )

            def forward(self, z):
                x = self.relu(self.fc3(z))
                deconv_input = self.fc4(x)
                deconv_input = deconv_input.view(-1, 256, 1, 1)
                x = self.convt(deconv_input)
                return x

        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (1, 16), (1, 1, 16))  # type: ignore
class StressTest(CorrectnessTest):

    def runTest(self):
        pass

    def test_data_reorganize(self, cpu_only=False):

        def get_coreml_model_reorganize(X, params):
            eval = True
            mlmodel = None
            try:
                input_dim = X.shape[2:]
                input_features = [('data', datatypes.Array(*input_dim))]
                output_features = [('output', None)]
                builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
                builder.add_reorganize_data('reorg', 'data', 'output', mode=params["mode"],
                                            block_size=params["block_size"])
                mlmodel = MLModel(builder.spec)
            except RuntimeError as e:
                print(e)
                eval = False

            return mlmodel, eval

        def get_tf_predictions_reorganize(X, params):
            Hin = params["H"]
            Win = params["W"]
            Cin = params["C"]
            with tf.Graph().as_default(), tf.Session() as sess:
                x = tf.placeholder(tf.float32, shape=(1, Hin, Win, Cin))
                if params["mode"] == 'SPACE_TO_DEPTH':
                    y = tf.space_to_depth(x, params["block_size"])
                else:
                    y = tf.depth_to_space(x, params["block_size"])
                return sess.run(y, feed_dict={x: X})

        '''
        Define Params
        '''
        params_dict = dict( C = [1,2,8,16,15,27],
                            H = [2,4,6,8,10,15,21,16],
                            W = [2,4,6,8,10,15,21,16],
                            block_size = [2,3,4,5],
                            mode = ['SPACE_TO_DEPTH','DEPTH_TO_SPACE']
                            )
        params = [x for x in list(itertools.product(*params_dict.values()))]
        all_candidates = [dict(zip(params_dict.keys(), x)) for x in params]     
        valid_params = []               
        for pr in all_candidates:
            if pr["mode"] == 'SPACE_TO_DEPTH': 
                if pr["H"] % pr["block_size"] == 0 and pr["W"] % pr["block_size"] == 0:
                    valid_params.append(pr)  
            else:
                if pr["C"] % (pr["block_size"] ** 2) == 0:
                    valid_params.append(pr)        
        print("Total params to be tested: ", len(valid_params), "out of canditates: ", len(all_candidates))
        '''
        Test
        '''
        failed_tests_compile = []
        for i in range(len(valid_params)):
            params = valid_params[i]
            #print("=========: ", params)
            #if i % 10 == 0: print("======== Testing {}/{}".format(str(i), str(len(valid_params))))
            X = np.random.rand(1,params["C"],params["H"],params["W"])
            tf_preds = get_tf_predictions_reorganize(np.transpose(X,[0,2,3,1]), params)
            tf_preds = np.transpose(tf_preds, [0,3,1,2])
            coreml_model, eval = get_coreml_model_reorganize(np.expand_dims(X, axis=0), params)
            if eval is False:
                failed_tests_compile.append(params)
            else:
                input_dict = {'data': np.expand_dims(X, axis=0)}
                ref_output_dict = {'output': tf_preds[0, :, :, :]}
                self._test_model(input_dict, ref_output_dict, coreml_model, cpu_only=cpu_only)

        self.assertEqual(failed_tests_compile, [])

    def test_data_reorganize_cpu_only(self):
        self.test_data_reorganize(cpu_only=True)

    def test_depthwise_conv(self, cpu_only=False):

        def get_coreml_model_depthwise(X, params, w):
            eval = True
            mlmodel = None
            try:
                input_dim = X.shape[2:]
                input_features = [('data', datatypes.Array(*input_dim))]
                output_features = [('output', None)]
                builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
                # tranlate weights : (Kh, Kw, kernel_channels, output_channels) == (Kh, Kw, Cin/g, Cout) == (Kh, Kw, 1, channel_multiplier * Cin)
                w_e = np.reshape(w, (params["kernel_size"], params["kernel_size"], params["multiplier"] * params["C"], 1))
                w_e = np.transpose(w_e, [0, 1, 3, 2])
                if params["padding"] == 'SAME':
                    pad_mode = 'same'
                else:
                    pad_mode = 'valid'
                builder.add_convolution('conv',
                                        kernel_channels=1,
                                        output_channels=params["multiplier"] * params["C"],
                                        height=params["kernel_size"], width=params["kernel_size"],
                                        stride_height=params["stride"], stride_width=params["stride"],
                                        border_mode=pad_mode,
                                        groups=params["C"],
                                        W=w_e, b=None,
                                        has_bias=0, is_deconv=0, output_shape=None,
                                        input_name='data', output_name='output')

                mlmodel = MLModel(builder.spec)
            except RuntimeError as e:
                print(e)
                eval = False
            return mlmodel, eval

        def get_tf_predictions_depthwise(X, params, w):
            Hin = Win = params["H"]
            Cin = params["C"]
            Kh = Kw = params["kernel_size"]
            channel_multiplier = params["multiplier"]
            with tf.Graph().as_default(), tf.Session() as sess:
                x = tf.placeholder(tf.float32, shape=(1, Hin, Win, Cin))
                W = tf.constant(w, dtype=tf.float32, shape=[Kh, Kw, Cin, channel_multiplier])
                y = tf.nn.depthwise_conv2d(x, W, strides=[1, params["stride"], params["stride"], 1],
                                           padding=params["padding"])
                return sess.run(y, feed_dict={x: X})


        '''
        Define Params
        '''
        params_dict = dict( C = [1,4,7],
                           H = [11,16],
                           stride = [1,2,3],
                           kernel_size = [1,2,3,5],
                           multiplier = [1,2,3,4],
                           padding = ['SAME', 'VALID']
                           )
        params = [x for x in list(itertools.product(*params_dict.values()))]
        all_candidates = [dict(zip(params_dict.keys(), x)) for x in params]     
        valid_params = []               
        for pr in all_candidates:
            if pr["padding"] == 'VALID':
                if np.floor((pr["H"]-pr["kernel_size"])/pr["stride"]) + 1 <= 0:
                    continue
            valid_params.append(pr)       
        print("Total params to be tested: ", len(valid_params), "out of canditates: ", len(all_candidates))
        '''
        Test
        '''
        failed_tests_compile = []
        for i in range(len(valid_params)):
            params = valid_params[i]
            #print("=========: ", params)
            #if i % 10 == 0: print "======== Testing {}/{}".format(str(i), str(len(valid_params)))
            X = np.random.rand(1,params["C"],params["H"],params["H"])
            w = np.random.rand(params["kernel_size"], params["kernel_size"], params["C"], params["multiplier"])
            tf_preds = get_tf_predictions_depthwise(np.transpose(X,[0,2,3,1]), params, w)
            tf_preds = np.transpose(tf_preds, [0,3,1,2])
            coreml_model, eval = get_coreml_model_depthwise(np.expand_dims(X, axis=0), params, w)
            if eval is False:
                failed_tests_compile.append(params)
            else:
                input_dict = {'data': np.expand_dims(X, axis=0)}
                ref_output_dict = {'output': tf_preds[0, :, :, :]}
                self._test_model(input_dict, ref_output_dict, coreml_model, cpu_only=cpu_only)

        self.assertEqual(failed_tests_compile, [])

    def test_depthwise_conv_cpu_only(self, cpu_only=False):
        self.test_depthwise_conv(cpu_only=True)

    @unittest.skipUnless(
        macos_version() >= (10, 14), 'Only supported on MacOS 10.14+')
    def test_resize_bilinear(self, cpu_only=False):

        def get_coreml_model_resize_bilinear(X, params):
            eval = True
            mlmodel = None
            try:
                input_dim = X.shape[2:]
                input_features = [('data', datatypes.Array(*input_dim))]
                output_features = [('output', None)]
                builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
                if params["align_corners"]:
                    mode = 'STRICT_ALIGN_ENDPOINTS_MODE'
                else:
                    mode = 'UPSAMPLE_MODE'
                builder.add_resize_bilinear('resize', 'data', 'output', target_height=params["Hnew"], target_width=params["Wnew"],
                                            mode=mode)
                mlmodel = MLModel(builder.spec)
            except RuntimeError as e:
                print(e)
                eval = False

            return mlmodel, eval

        def get_tf_predictions_resize_bilinear(X, params):
            with tf.Graph().as_default(), tf.Session() as sess:
                x = tf.placeholder(tf.float32, shape=(params["batch"], params["H"], params["W"], params["ch"]))
                y = tf.image.resize_bilinear(x, size = [params["Hnew"], params["Wnew"]], align_corners=params["align_corners"])
                return sess.run(y, feed_dict={x: X})


        '''
        Define Params
        '''
        params_dict = dict( H = [1,3,10], #[1,2,3,10]
                            W = [1,3,10], #[1,2,3,10]
                            Hnew = [1,2,6], #[1,3,6,12,20]
                            Wnew = [1,2,6], #[1,3,6,12,20]
                            align_corners = [False, True], #[False, True]
                            ch = [1,5], #[1,5]
                            batch = [1, 3], #[1, 3]
                            )
        params = [x for x in list(itertools.product(*params_dict.values()))]
        valid_params = [dict(zip(params_dict.keys(), x)) for x in params]
        print("Total params to be tested: {}".format(len(valid_params)))
        '''
        Test
        '''
        failed_tests_compile = []
        for i in range(len(valid_params)):
            params = valid_params[i]
            # #print("=========: ", params)
            if i % 100 == 0:
                print("======================= Testing {}/{}".format(str(i), str(len(valid_params))))
            X = np.round(255 * np.random.rand(params["batch"], params["ch"], params["H"], params["W"]))
            tf_preds = get_tf_predictions_resize_bilinear(np.transpose(X, [0, 2, 3, 1]), params)
            tf_preds = np.transpose(tf_preds, [0, 3, 1, 2])
            coreml_model, eval = get_coreml_model_resize_bilinear(np.expand_dims(X, axis=0), params)
            if eval is False:
                failed_tests_compile.append(params)
            else:
                input_dict = {'data': np.expand_dims(X, axis=0)}
                ref_output_dict = {'output': np.expand_dims(tf_preds, axis=0)}
                self._test_model(input_dict, ref_output_dict, coreml_model, cpu_only=cpu_only)

        self.assertEqual(failed_tests_compile, [])

    @unittest.skipUnless(
        macos_version() >= (10, 14), 'Only supported on MacOS 10.14+')
    def test_resize_bilinear_cpu_only(self):
        self.test_resize_bilinear(cpu_only=True)

    @unittest.skipUnless(
        macos_version() >= (10, 14), 'Only supported on MacOS 10.14+')
    def test_crop_resize(self, cpu_only=False):

        def get_coreml_model_crop_resize(params):
            eval = True
            mlmodel = None
            batch, ch, n_roi = params["b_c_n"]
            H = params["H"]
            W = params["W"]
            try:
                input_features = [('data', datatypes.Array(ch,H,W))]
                input_features.append(('roi', datatypes.Array(4, 1, 1)))
                if batch != 1:
                    input_features.append(('box_ind', datatypes.Array(1, 1, 1)))
                output_features = [('output', None)]
                builder = neural_network.NeuralNetworkBuilder(input_features, output_features)

                if batch != 1:
                    builder.add_elementwise('concat', ['box_ind','roi'], 'roi_out', 'CONCAT')
                    input_names = ['data', 'roi_out']
                else:
                    input_names = ['data', 'roi']

                builder.add_crop_resize('resize', input_names, 'output',
                                        target_height=params["Hnew"], target_width=params["Wnew"],
                                        mode='ALIGN_ENDPOINTS_MODE',
                                        normalized_roi=True,
                                        box_indices_mode='CORNERS_HEIGHT_FIRST',
                                        spatial_scale=1.0)
                mlmodel = MLModel(builder.spec)
            except RuntimeError as e:
                print(e)
                eval = False

            return mlmodel, eval

        def get_tf_predictions_crop_resize(X, boxes, box_ind, params):
            batch, ch, n_roi = params["b_c_n"]
            with tf.Graph().as_default(), tf.Session() as sess:
                x = tf.placeholder(tf.float32, shape=(batch, params["H"], params["W"], ch))
                y = tf.image.crop_and_resize(x, boxes, box_ind, crop_size=[params["Hnew"], params["Wnew"]])
                return sess.run(y, feed_dict={x: X})


        '''
        Define Params
        '''
        params_dict = dict( H = [1,3,10], #[1,2,3,6,10]
                            W = [1,3,10], #[1,2,3,6,10]
                            Hnew = [1,2,3,6], #[1,2,3,6,12,20]
                            Wnew = [1,2,3,6], #[1,2,3,6,12,20]
                            b_c_n = [(1,1,1),(1,2,3),(3,2,1),(3,4,3)], #[(1,1,1),(1,2,3),(3,2,1),(3,4,3)]
                            )
        params = [x for x in list(itertools.product(*params_dict.values()))]
        valid_params = [dict(zip(params_dict.keys(), x)) for x in params]
        print("Total params to be tested: {}".format(len(valid_params)))
        '''
        Test
        '''
        failed_tests_compile = []
        for i in range(len(valid_params)):
            params = valid_params[i]
            #print("=========: ", params)
            # if i % 100 == 0:
            #     print("======================= Testing {}/{}".format(str(i), str(len(valid_params))))
            batch, ch, n_roi = params["b_c_n"]
            X = np.round(255 * np.random.rand(batch, ch, params["H"], params["W"]))
            roi = np.zeros((n_roi, 4), dtype=np.float32)
            box_ind = np.zeros((n_roi))
            if batch != 1:
                box_ind = np.random.randint(low=0, high=batch, size=(n_roi))
            for ii in range(n_roi):
                r = np.random.rand(4)
                w_start = r[0]
                h_start = r[1]
                w_end = r[2] * (1 - w_start) + w_start
                h_end = r[3] * (1 - h_start) + h_start
                roi[ii, :] = [h_start, w_start, h_end, w_end]
                roi[ii, :] = np.round(100 * roi[ii, :]) / 100
                assert roi[ii, 0] <= roi[ii, 2]
                assert roi[ii, 1] <= roi[ii, 3]


            tf_preds = get_tf_predictions_crop_resize(np.transpose(X, [0, 2, 3, 1]), roi, box_ind, params)
            tf_preds = np.transpose(tf_preds, [0, 3, 1, 2])
            coreml_model, eval = get_coreml_model_crop_resize(params)
            if eval is False:
                failed_tests_compile.append(params)
            else:
                input_dict = {'data': np.expand_dims(X, axis=0)}
                input_dict['roi'] = np.reshape(roi, (n_roi,1,4,1,1))
                if batch != 1:
                    input_dict['box_ind'] = np.reshape(box_ind.astype(np.float32), (n_roi,1,1,1,1))
                ref_output_dict = {'output': np.expand_dims(tf_preds, axis=0)}
                self._test_model(input_dict, ref_output_dict, coreml_model, cpu_only=cpu_only)

        self.assertEqual(failed_tests_compile, [])

    @unittest.skipUnless(
        macos_version() >= (10, 14), 'Only supported on MacOS 10.14+')
    def test_crop_resize_cpu_only(self):
        self.test_crop_resize(cpu_only=True)
                               output_name='out')
        # Conv -> Crop -> BN -> ReLU
        spec = builder.spec.neuralNetwork
        np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
        np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
        np.testing.assert_equal('activation', spec.layers[3].WhichOneof('layer'))

        # transform the pattern
        transform_conv_crop(builder.spec)
        # Conv -> BN -> ReLU -> Crop
        np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
        np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer'))
        np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer'))


@unittest.skipIf(platform != 'darwin' or macos_version() < (10, 15), "Requires MacOS 10.15 or later")
class Redundant_Transposees_Test(unittest.TestCase):

    def _test_builder(self, builder, input_shape, expected_layer_num=None):

        data = np.random.rand(*input_shape)

        # Mlmodel before
        mlmodel = MLModel(builder.spec)
        output_before = mlmodel.predict({'data':data})['out']
        num_layers_before = len(builder.spec.neuralNetwork.layers)

        remove_redundant_transposes(builder.spec)

        layers = builder.spec.neuralNetwork.layers
        if expected_layer_num == None:
Exemple #12
0
import onnx

import onnx.backend.test

from onnx_coreml._backend import CoreMLBackend, CoreMLBackendND

from coremltools.models.utils import macos_version

# Disable Rank 5 mapping for ONNX backend testing
DISABLE_RANK5_MAPPING = False

MIN_MACOS_VERSION_10_15 = (10, 15)
# If MACOS version is less than 10.15
# Then force testing on CoreML 2.0
if macos_version() < MIN_MACOS_VERSION_10_15:
    DISABLE_RANK5_MAPPING = False

# import all test cases at global scope to make them visible to python.unittest
backend_test = onnx.backend.test.BackendTest(
    CoreMLBackendND if DISABLE_RANK5_MAPPING else CoreMLBackend, __name__)

# Only include the big models tests
backend_test.include('test_resnet50')
backend_test.include('test_inception_v1')
backend_test.include('test_inception_v2')
backend_test.include('test_densenet121')
backend_test.include('test_shufflenet')
backend_test.include('test_squeezenet')

# Slow tests. Skipping for now.
Exemple #13
0
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

import numpy as _np
import random
import unittest

from coremltools.models.utils import evaluate_transformer,\
    macos_version, is_macos

from coremltools._deps import HAS_SKLEARN
if HAS_SKLEARN:
    from sklearn.preprocessing import Normalizer
    from coremltools.converters import sklearn as converter


@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
                     'Only supported on macOS 10.13+')
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class NormalizerScikitTest(unittest.TestCase):
    """
    Unit test class for testing scikit-learn converter.
    """
    def test_random(self):
        # Generate some random data_imputeValue.multiArrayValue[i]
        X = _np.random.random(size=(50, 3))

        for param in ('l1', 'l2', 'max'):

            cur_model = Normalizer(norm=param)

            output = cur_model.fit_transform(X)
class TestModels(CorrectnessTest):         
  
  def test_inception_v3_slim(self):
    #Download model
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v3_2016_08_28_frozen.pb')
    
    #Convert to coreml
    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v3_2016_08_28.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV3/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,299,299,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV3/Predictions/Softmax:0',
        img_size = 299)

  def test_googlenet_v1_nonslim(self):
    #Download model
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'tensorflow_inception_graph.pb')
    
    #Convert to coreml
    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'googlenet_v1_nonslim.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['softmax2:0'],
        input_name_shape_dict = {'input:0':[1,224,224,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'softmax2:0',
        img_size = 224)

  def test_googlenet_resnet_v2(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_resnet_v2_2016_08_30_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_resnet_v2_2016_08_30_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_resnet_v2_2016_08_30_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionResnetV2/Logits/Predictions:0'],
        input_name_shape_dict = {'input:0':[1,299,299,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionResnetV2/Logits/Predictions:0',
        img_size = 299)

  def test_googlenet_v1_slim(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV1/Logits/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,224,224,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV1/Logits/Predictions/Softmax:0',
        img_size = 224)

  def test_googlenet_v1_slim_with_img_format(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.mlmodel')
    with self.assertWarns(Warning):
      mlmodel = tf_converter.convert(
          tf_model_path = tf_model_path,
          mlmodel_path = mlmodel_path,
          output_feature_names = ['InceptionV1/Logits/Predictions/Softmax:0'],
          input_name_shape_dict = {'input:0':[1,224,224,3]},
          image_input_names = ['input:0'],
          red_bias = -1, 
          green_bias = -1, 
          blue_bias = -1, 
          image_scale = 2.0/255.0,
          tf_image_format='NHWC') # Should not be used by legacy converter, expect warning.

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV1/Logits/Predictions/Softmax:0',
        img_size = 224)

  @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
  def test_googlenet_v1_slim_coreml_3(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV1/Logits/Predictions/Softmax'],
        input_name_shape_dict = {'input':[1,224,224,3]},
        image_input_names = ['input'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0,
        minimum_ios_deployment_target='13')

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV1/Logits/Predictions/Softmax:0',
        img_size = 224,
        minimum_ios_deployment_target='13')

  @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
  def test_googlenet_v1_slim_coreml_3_with_img_format(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v1_2016_08_28_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV1/Logits/Predictions/Softmax'],
        input_name_shape_dict = {'input':[1,224,224,3]},
        image_input_names = ['input'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0,
        minimum_ios_deployment_target='13',
        tf_image_format='NHWC')

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV1/Logits/Predictions/Softmax:0',
        img_size = 224,
        minimum_ios_deployment_target='13')

  def test_googlenet_v2_slim(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v2_2016_08_28_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v2_2016_08_28_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v2_2016_08_28_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV2/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,224,224,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV2/Predictions/Softmax:0',
        img_size = 224)

  def test_googlenet_v4_slim(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v4_2016_09_09_frozen.pb.tar.gz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'inception_v4_2016_09_09_frozen.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'inception_v4_2016_09_09_frozen.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['InceptionV4/Logits/Predictions:0'],
        input_name_shape_dict = {'input:0':[1,299,299,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'InceptionV4/Logits/Predictions:0',
        img_size = 299)


  def test_mobilenet_v1_100_224(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_1.0_224/frozen_graph.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_1.0_224.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['MobilenetV1/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,224,224,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'MobilenetV1/Predictions/Softmax:0',
        img_size = 224)

  def test_mobilenet_v2_100_224(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_1.0_224/frozen_graph.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_1.0_224.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['MobilenetV1/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,224,224,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'MobilenetV1/Predictions/Softmax:0',
        img_size = 224)


  def test_mobilenet_v1_75_192(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.75_192_frozen.tgz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_0.75_192/frozen_graph.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_0.75_192.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['MobilenetV1/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,192,192,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'MobilenetV1/Predictions/Softmax:0',
        img_size = 192) 
        
  def test_mobilenet_v1_50_160(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_160_frozen.tgz'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_0.50_160/frozen_graph.pb')

    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'mobilenet_v1_0.50_160.mlmodel')
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['MobilenetV1/Predictions/Softmax:0'],
        input_name_shape_dict = {'input:0':[1,160,160,3]},
        image_input_names = ['input:0'],
        red_bias = -1, 
        green_bias = -1, 
        blue_bias = -1, 
        image_scale = 2.0/255.0)

    #Test predictions on an image
    self._test_coreml_model_image_input(
        tf_model_path = tf_model_path, 
        coreml_model = mlmodel,
        input_tensor_name = 'input:0',
        output_tensor_name = 'MobilenetV1/Predictions/Softmax:0',
        img_size = 160)

  #@unittest.skip("Failing GPU backend: related to https://github.com/tf-coreml/tf-coreml/issues/26")
  def test_style_transfer(self):
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/stylize_v1.zip'
    tf_model_dir = _download_file(url = url)
    tf_model_path = os.path.join(TMP_MODEL_DIR, 'stylize_quantized.pb')
    mlmodel_path = os.path.join(TMP_MODEL_DIR, 'stylize_quantized.mlmodel')
    # ? style transfer image size and style number?
    mlmodel = tf_converter.convert(
        tf_model_path = tf_model_path,
        mlmodel_path = mlmodel_path,
        output_feature_names = ['Squeeze:0'],
        input_name_shape_dict = {'input:0':[1,256,256,3], 'style_num:0':[26]})

    # Test predictions on an image
    input_tensors = [('input:0',[1,256,256,3]),
                     ('style_num:0',[26])]

    self.err_thresh = 0.5
    self._test_tf_model(
        tf_model_path = tf_model_path,
        coreml_model = mlmodel,
        input_tensors = input_tensors,
        output_tensor_names = ['Squeeze:0'],
        data_modes = ['image', 'onehot_0'], 
        delta = 1e-2,
        use_cpu_only = True,
        scale = 1,
        bias = 0,
        img_size = 256,
        sequence_inputs = {'style_num:0'})
Exemple #15
0
 def test_coreml(self):
     from coremltools.models.utils import macos_version
     if macos_version() >= (10, 13):
         self._test_function('coreml', self.CoremlParse)
Exemple #16
0
class SingleOperatorTest(unittest.TestCase):
    def test_conv(self):  # type: () -> None
        kernel_shape = (3, 2)
        strides = (2, 3)
        pads = (4, 2, 4, 2)
        dilations = (1, 2)
        group = 1
        weight = from_array(_random_array((16, 3, 3, 2)), name="weight")

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, dilations,
                                             kernel_shape, pads, strides)

        output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])

        _test_single_node("Conv", [input_shape], [output_shape],
                          initializer=[weight],
                          dilations=dilations,
                          group=group,
                          kernel_shape=kernel_shape,
                          pads=pads,
                          strides=strides)

    def test_conv_transpose(self):  # type: () -> None
        kernel_shape = (3, 3)
        pads = (0, 0, 0, 0)
        C_in = 3
        C_out = 12
        H_in, W_in = 30, 30
        strides = (2, 2)

        input_shape = (1, C_in, H_in, W_in)
        weight = from_array(_random_array(
            (C_in, C_out, kernel_shape[0], kernel_shape[1])),
                            name="weight")

        H_out = (H_in - 1) * strides[0] + kernel_shape[0] - pads[0] - pads[2]
        W_out = (W_in - 1) * strides[1] + kernel_shape[1] - pads[1] - pads[3]
        output_shape = (1, C_out, H_out, W_out)

        _test_single_node(
            "ConvTranspose",
            [input_shape],
            [output_shape],
            initializer=[weight],
            # Default values for other attributes: dilations=[1, 1], group=1
            strides=strides,
            kernel_shape=kernel_shape,
            pads=pads,
            output_padding=(0, 0))

    def test_conv_without_pads(self):  # type: () -> None
        kernel_shape = (3, 2)
        strides = (2, 3)
        dilations = (1, 2)
        group = 1
        weight = from_array(_random_array((16, 3, 3, 2)), name="weight")

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, dilations,
                                             kernel_shape, [0, 0, 0, 0],
                                             strides)

        output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])
        _test_single_node("Conv", [input_shape], [output_shape],
                          initializer=[weight],
                          dilations=dilations,
                          group=group,
                          kernel_shape=kernel_shape,
                          strides=strides)

    def test_max_pool(self):  # type: () -> None
        kernel_shape = (5, 3)
        pads = (2, 1, 2, 1)
        strides = (1, 2)

        input_shape = (1, 3, 224, 224)

        output_size = _conv_pool_output_size(input_shape, [1, 1], kernel_shape,
                                             pads, strides)

        output_shape = (1, 3, output_size[0], output_size[1])

        _test_single_node("MaxPool", [input_shape], [output_shape],
                          test_name='test_max_pool_1',
                          kernel_shape=kernel_shape,
                          pads=pads,
                          strides=strides)

        output_size = _conv_pool_output_size(input_shape, [1, 1], kernel_shape,
                                             [0, 0, 0, 0], strides)
        output_shape = (1, 3, output_size[0], output_size[1])
        _test_single_node("MaxPool", [input_shape], [output_shape],
                          test_name='test_max_pool_2',
                          kernel_shape=kernel_shape,
                          strides=strides)

    @unittest.skip('Skip due to internal CoreML CPU backend issue')
    def test_avg_pool(self):  # type: () -> None
        kernel_shape = (5, 3)
        pads = (2, 1, 2, 1)
        strides = (1, 2)

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, (1, 1), kernel_shape,
                                             pads, strides)
        output_shape = (1, 3, output_size[0], output_size[1])
        _test_single_node("AveragePool", [input_shape], [output_shape],
                          test_name='test_avg_pool_1',
                          kernel_shape=kernel_shape,
                          pads=pads,
                          strides=strides)

        output_size = _conv_pool_output_size(input_shape, (1, 1), kernel_shape,
                                             [0, 0, 0, 0], strides)
        output_shape = (1, 3, output_size[0], output_size[1])
        _test_single_node("AveragePool", [input_shape], [output_shape],
                          test_name='test_avg_pool_2',
                          kernel_shape=kernel_shape,
                          strides=strides)

    def test_bn(self):  # type: () -> None
        scale = from_array(_random_array((3, )), name="scale")
        bias = from_array(_random_array((3, )), name="bias")
        mean = from_array(_random_array((3, )), name="mean")
        var = from_array(_random_array((3, )), name="var")

        epsilon = 1e-5
        momentum = 0.001

        op_types = ["BatchNormalization", "SpatialBN"]
        for op_type in op_types:
            _test_single_node("BatchNormalization", [(1, 3, 224, 224)],
                              [(1, 3, 224, 224)],
                              initializer=[scale, bias, mean, var],
                              epsilon=epsilon,
                              momentum=momentum)

            # epsilon by default
            _test_single_node(
                "BatchNormalization",
                [(1, 3, 224, 224)],
                [(1, 3, 224, 224)],
                initializer=[scale, bias, mean, var],
                # epsilon=epsilon,
                momentum=momentum)

    def test_gemm(self,
                  minimum_ios_deployment_target='12'):  # type: () -> None
        input_shape = (1, 2048)
        output_shape = (1, 5)
        W = from_array(_random_array((output_shape[1], input_shape[1])),
                       name="weight")
        b = from_array(_random_array((output_shape[1], )), name="bias")
        _test_single_node(
            "Gemm", [input_shape], [output_shape],
            initializer=[W, b],
            decimal=3,
            transB=1,
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_gemm_ios13(self):
        self.test_gemm(minimum_ios_deployment_target='13')

    def test_gemm_transB_off(self,
                             minimum_ios_deployment_target='12'
                             ):  # type: () -> None
        input_shape = (1, 2048)
        output_shape = (1, 5)
        W = from_array(_random_array((input_shape[1], output_shape[1])),
                       name="weight")
        b = from_array(_random_array((output_shape[1], )), name="bias")
        _test_single_node(
            "Gemm", [input_shape], [output_shape],
            initializer=[W, b],
            decimal=3,
            transB=0,
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_gemm_transB_off_ios13(self):
        self.test_gemm_transB_off(minimum_ios_deployment_target='13')

    def test_lrn(self):  # type: () -> None
        _test_single_node("LRN", [(1, 3, 224, 224)], [(1, 3, 224, 224)],
                          alpha=9.99e-5,
                          beta=0.75,
                          bias=1.0,
                          size=5)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_split_axis_0_rank_3(self,
                                 minimum_ios_deployment_target='12'
                                 ):  # type: () -> None
        _test_single_node(
            "Split", [(2, 1, 200)], [(1, 1, 200), (1, 1, 200)],
            axes=0,
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_concat(self,
                    minimum_ios_deployment_target='13'):  # type: () -> None
        _test_single_node(
            "Concat", [(1, 2, 200), (1, 2, 200)], [(2, 2, 200)],
            axis=0,
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_gather(self,
                    minimum_ios_deployment_target='13'):  # type: () -> None
        _test_single_node(
            "Gather", [(5, 4, 3), (3, )], [(3, 4, 3)],
            axis=0,
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_reshape_same_rank(self,
                               minimum_ios_deployment_target='13'
                               ):  # type: () -> None
        _test_single_node(
            "Reshape", [(5, 4, 3), (3, )], [(4, 5, 3)],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_reshape_same_rank_infer_shape(self,
                                           minimum_ios_deployment_target='13'
                                           ):  # type: () -> None
        _test_single_node(
            "Reshape", [(5, 4, 3), (3, )], [(5, 2, 6)],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    # TODO: add test_reshape_diff_rank_infer_shape where shape is Constant and known
    # to test rank-4 into rank-3 reshape with shape inferencing
    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_reshape_dynamic(self,
                             minimum_ios_deployment_target='13'
                             ):  # type: () -> None
        _test_single_node(
            "Reshape", [(5, 4, 3, 2), (3, )], [(2, 3, 20)],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_squeeze(self,
                     minimum_ios_deployment_target='13'):  # type: () -> None
        _test_single_node(
            "Squeeze", [(5, 1, 3, 1, 1)], [(5, 3)],
            axes=[1, 3, 4],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_transpose_default(self,
                               minimum_ios_deployment_target='13'
                               ):  # type: () -> None
        _test_single_node(
            "Transpose", [(5, 3, 4, 6, 2)], [(2, 6, 4, 3, 5)],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(ONNX_SHAPE_INFERENCE_FAILS,
                     'ONNX Shape inference fails to recongnize correct shape')
    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_transpose_permute(self,
                               minimum_ios_deployment_target='13'
                               ):  # type: () -> None
        _test_single_node(
            "Transpose", [(5, 3, 4, 6, 2)], [(2, 3, 4, 6, 5)],
            axes=[4, 1, 2, 3, 0],
            minimum_ios_deployment_target=minimum_ios_deployment_target)

    @unittest.skipIf(ONNX_SHAPE_INFERENCE_FAILS,
                     'ONNX Shape inference fails to recongnize correct shape')
    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_unsqueeze(self,
                       minimum_ios_deployment_target='13'):  # type: () -> None
        _test_single_node(
            "Unsqueeze", [(5, 3, 4)], [(1, 5, 1, 3, 4)],
            axes=[0, 1],
            minimum_ios_deployment_target=minimum_ios_deployment_target)
    def CoreMLEmit(original_framework, architecture_name, architecture_path,
                   weight_path, image_path):
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter

        original_framework = checkfrozen(original_framework)

        def prep_for_coreml(prepname, BGRTranspose):
            if prepname == 'Standard':
                return 0.00784313725490196, -1, -1, -1
            elif prepname == 'ZeroCenter' and BGRTranspose == True:
                return 1, -123.68, -116.779, -103.939
            elif prepname == 'ZeroCenter' and BGRTranspose == False:
                return 1, -103.939, -116.779, -123.68
            elif prepname == 'Identity':
                return 1, 1, 1, 1
            else:
                raise ValueError()

        # IR to Model
        converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        print(converted_file)

        func = TestKit.preprocess_func[original_framework][architecture_name]

        import inspect
        funcstr = inspect.getsource(func)

        coreml_pre = funcstr.split('(')[0].split('.')[-1]

        if len(funcstr.split(',')) == 3:
            BGRTranspose = bool(0)
            img_size = int(funcstr.split('path,')[1].split(')')[0])
        else:
            BGRTranspose = bool(funcstr.split(',')[-2].split(')')[0])
            img_size = int(funcstr.split('path,')[1].split(',')[0])

        prep_list = prep_for_coreml(coreml_pre, BGRTranspose)

        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
            input_names=None,
            output_names=None,
            image_input_names=image_path,
            is_bgr=BGRTranspose,
            red_bias=prep_list[1],
            green_bias=prep_list[2],
            blue_bias=prep_list[3],
            gray_bias=0.0,
            image_scale=prep_list[0],
            class_labels=None,
            predicted_feature_name=None,
            predicted_probabilities_output='')

        import coremltools
        con_model = coremltools.models.MLModel(model)
        print("Model loading success.")

        from coremltools.models.utils import macos_version

        if macos_version() < (10, 13):
            return None

        else:
            from PIL import Image as pil_image
            img = pil_image.open(image_path)
            img = img.resize((img_size, img_size))

            input_data = img
            coreml_inputs = {str(input_name[0][0]): input_data}
            coreml_output = con_model.predict(coreml_inputs, useCPUOnly=False)
            converted_predict = coreml_output[str(output_name[0][0])]
            converted_predict = np.squeeze(converted_predict)

            return converted_predict
Exemple #18
0
    def _evaluation_test_helper(self,
                                class_labels,
                                use_probability_estimates,
                                allow_slow,
                                allowed_prob_delta=0.00001):
        # Parameters to test
        kernel_parameters = [{}, {
            'kernel': 'rbf',
            'gamma': 1.2
        }, {
            'kernel': 'linear'
        }, {
            'kernel': 'poly'
        }, {
            'kernel': 'poly',
            'degree': 2
        }, {
            'kernel': 'poly',
            'gamma': 0.75
        }, {
            'kernel': 'poly',
            'degree': 0,
            'gamma': 0.9,
            'coef0': 2
        }, {
            'kernel': 'sigmoid'
        }, {
            'kernel': 'sigmoid',
            'gamma': 1.3
        }, {
            'kernel': 'sigmoid',
            'coef0': 0.8
        }, {
            'kernel': 'sigmoid',
            'coef0': 0.8,
            'gamma': 0.5
        }]
        non_kernel_parameters = [{}, {
            'C': 1
        }, {
            'C': 1.5,
            'shrinking': True
        }, {
            'C': 0.5,
            'shrinking': False
        }]

        # Generate some random data
        x, y = [], []
        random.seed(42)
        for _ in range(50):
            x.append([
                random.gauss(200, 30),
                random.gauss(-100, 22),
                random.gauss(100, 42)
            ])
            y.append(random.choice(class_labels))
        column_names = ['x1', 'x2', 'x3']
        # make sure first label is seen first, second is seen second, and so on.
        for i, val in enumerate(class_labels):
            y[i] = val
        df = pd.DataFrame(x, columns=column_names)

        # Test
        for param1 in non_kernel_parameters:
            for param2 in kernel_parameters:
                cur_params = param1.copy()
                cur_params.update(param2)
                cur_params['probability'] = use_probability_estimates
                cur_params['max_iter'] = 10  # Don't want test to take too long
                print("cur_params=" + str(cur_params))

                cur_model = SVC(**cur_params)
                cur_model.fit(x, y)

                spec = scikit_converter.convert(cur_model, column_names,
                                                'target')

                if macos_version() >= (10, 13):
                    if use_probability_estimates:
                        probability_lists = cur_model.predict_proba(x)
                        df['classProbability'] = [
                            dict(zip(cur_model.classes_, cur_vals))
                            for cur_vals in probability_lists
                        ]
                        metrics = evaluate_classifier_with_probabilities(
                            spec,
                            df,
                            probabilities='classProbability',
                            verbose=True)
                        self.assertEquals(metrics['num_key_mismatch'], 0)
                        self.assertLess(metrics['max_probability_error'],
                                        allowed_prob_delta)
                    else:
                        df['prediction'] = cur_model.predict(x)
                        metrics = evaluate_classifier(spec, df, verbose=False)
                        self.assertEquals(metrics['num_errors'], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
class MLModelTest(unittest.TestCase):
    @classmethod
    def setUpClass(self):

        spec = Model_pb2.Model()
        spec.specificationVersion = coremltools.SPECIFICATION_VERSION

        features = ['feature_1', 'feature_2']
        output = 'output'
        for f in features:
            input_ = spec.description.input.add()
            input_.name = f
            input_.type.doubleType.MergeFromString(b'')

        output_ = spec.description.output.add()
        output_.name = output
        output_.type.doubleType.MergeFromString(b'')

        lr = spec.glmRegressor
        lr.offset.append(0.1)
        weights = lr.weights.add()
        coefs = [1.0, 2.0]
        for i in coefs:
            weights.value.append(i)

        spec.description.predictedFeatureName = 'output'
        self.spec = spec

    def test_model_creation(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename)
        model = MLModel(filename)
        self.assertIsNotNone(model)

    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = 'Test author'
        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.get_spec().description.metadata.author,
                          'Test author')

        model.license = 'Test license'
        self.assertEquals(model.license, 'Test license')
        self.assertEquals(model.get_spec().description.metadata.license,
                          'Test license')

        model.short_description = 'Test model'
        self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(
            model.get_spec().description.metadata.shortDescription,
            'Test model')

        model.input_description['feature_1'] = 'This is feature 1'
        self.assertEquals(model.input_description['feature_1'],
                          'This is feature 1')

        model.output_description['output'] = 'This is output'
        self.assertEquals(model.output_description['output'], 'This is output')

        filename = tempfile.mktemp(suffix='.mlmodel')
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.license, 'Test license')
        # self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(model.input_description['feature_1'],
                          'This is feature 1')
        self.assertEquals(model.output_description['output'], 'This is output')

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_predict_api(self):
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEquals(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_input(self):
        rename_feature(self.spec,
                       'feature_1',
                       'renamed_feature',
                       rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'renamed_feature': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEquals(preds['output'], 3.1)
        # reset the spec for next run
        rename_feature(self.spec,
                       'renamed_feature',
                       'feature_1',
                       rename_inputs=True)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_input_bad(self):
        rename_feature(self.spec, 'blah', 'bad_name', rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEquals(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_output(self):
        rename_feature(self.spec,
                       'output',
                       'renamed_output',
                       rename_inputs=False,
                       rename_outputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEquals(preds['renamed_output'], 3.1)
        rename_feature(self.spec,
                       'renamed_output',
                       'output',
                       rename_inputs=False,
                       rename_outputs=True)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_output_bad(self):
        rename_feature(self.spec,
                       'blah',
                       'bad_name',
                       rename_inputs=False,
                       rename_outputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEquals(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_future_version(self):
        self.spec.specificationVersion = 10000
        model = MLModel(self.spec)
        # this model should exist, but throw an exception when we try to use predict because the engine doesn't support
        # this model version
        self.assertIsNotNone(model)
        with self.assertRaises(Exception):
            model.predict(1)
        self.spec.specificationVersion = 1

    @unittest.skipIf(macos_version() >= (10, 13),
                     'Only supported on macOS 10.13-')
    def test_MLModel_warning(self):
        self.spec.specificationVersion = 3
        import warnings
        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            model = MLModel(self.spec)
            assert len(w) == 1
            assert issubclass(w[-1].category, RuntimeWarning)
            assert "not able to run predict()" in str(w[-1].message)
        self.spec.specificationVersion = 1
        model = MLModel(self.spec)
Exemple #20
0
 def test_coreml(self):
     from coremltools.models.utils import macos_version
     if macos_version() < (10, 13):
         print('Coreml is not supported in your platform.', file=sys.stderr)
     else:
         self._test_function('coreml', self.CoremlParse)
Exemple #21
0
class MLModelTest(unittest.TestCase):
    @classmethod
    def setUpClass(self):

        spec = Model_pb2.Model()
        spec.specificationVersion = coremltools.SPECIFICATION_VERSION

        features = ['feature_1', 'feature_2']
        output = 'output'
        for f in features:
            input_ = spec.description.input.add()
            input_.name = f
            input_.type.doubleType.MergeFromString(b'')

        output_ = spec.description.output.add()
        output_.name = output
        output_.type.doubleType.MergeFromString(b'')

        lr = spec.glmRegressor
        lr.offset.append(0.1)
        weights = lr.weights.add()
        coefs = [1.0, 2.0]
        for i in coefs:
            weights.value.append(i)

        spec.description.predictedFeatureName = 'output'
        self.spec = spec

    def test_model_creation(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename)
        model = MLModel(filename)
        self.assertIsNotNone(model)

    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = 'Test author'
        self.assertEqual(model.author, 'Test author')
        self.assertEqual(model.get_spec().description.metadata.author,
                         'Test author')

        model.license = 'Test license'
        self.assertEqual(model.license, 'Test license')
        self.assertEqual(model.get_spec().description.metadata.license,
                         'Test license')

        model.short_description = 'Test model'
        self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            'Test model')

        model.input_description['feature_1'] = 'This is feature 1'
        self.assertEqual(model.input_description['feature_1'],
                         'This is feature 1')

        model.output_description['output'] = 'This is output'
        self.assertEqual(model.output_description['output'], 'This is output')

        filename = tempfile.mktemp(suffix='.mlmodel')
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEqual(model.author, 'Test author')
        self.assertEqual(model.license, 'Test license')
        # self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(model.input_description['feature_1'],
                         'This is feature 1')
        self.assertEqual(model.output_description['output'], 'This is output')

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_predict_api(self):
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_input(self):
        rename_feature(self.spec,
                       'feature_1',
                       'renamed_feature',
                       rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'renamed_feature': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds['output'], 3.1)
        # reset the spec for next run
        rename_feature(self.spec,
                       'renamed_feature',
                       'feature_1',
                       rename_inputs=True)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_input_bad(self):
        rename_feature(self.spec, 'blah', 'bad_name', rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_output(self):
        rename_feature(self.spec,
                       'output',
                       'renamed_output',
                       rename_inputs=False,
                       rename_outputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds['renamed_output'], 3.1)
        rename_feature(self.spec,
                       'renamed_output',
                       'output',
                       rename_inputs=False,
                       rename_outputs=True)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_rename_output_bad(self):
        rename_feature(self.spec,
                       'blah',
                       'bad_name',
                       rename_inputs=False,
                       rename_outputs=True)
        model = MLModel(self.spec)
        preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds['output'], 3.1)

    @unittest.skipIf(macos_version() < (10, 13),
                     'Only supported on macOS 10.13+')
    def test_future_version(self):
        self.spec.specificationVersion = 10000
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # this model should exist, but throw an exception when we try to use
        # predict because the engine doesn't support this model version
        self.assertIsNotNone(model)
        with self.assertRaises(Exception):
            try:
                model.predict({})
            except Exception as e:
                assert 'Core ML model specification version' in str(e)
                raise
        self.spec.specificationVersion = 1

    @unittest.skipIf(macos_version() >= (10, 13),
                     'Only supported on macOS 10.13-')
    def test_MLModel_warning(self):
        self.spec.specificationVersion = 3
        import warnings
        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            model = MLModel(self.spec)
            assert len(w) == 1
            assert issubclass(w[-1].category, RuntimeWarning)
            assert "not able to run predict()" in str(w[-1].message)
        self.spec.specificationVersion = 1
        model = MLModel(self.spec)

    def test_convert_nn_spec_to_half_precision(self):
        # simple network with quantization layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        weights = np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(name='inner_product',
                                  W=weights,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='data',
                                  output_name='out')
        model = MLModel(builder.spec)
        spec = convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)

        # simple network without quantization layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_lrn(name='lrn',
                        input_name='data',
                        output_name='out',
                        alpha=2,
                        beta=3,
                        local_size=1,
                        k=8)
        model = MLModel(builder.spec)
        spec = convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)

    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        assert model.get_spec().specificationVersion == 1

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename)
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # simple neural network with only spec 1 layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation('relu', 'RELU', 'data', 'out')
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix='.mlmodel')
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        assert model.get_spec().specificationVersion == 3
import unittest
import numpy as np
import os, shutil
import tempfile
import coremltools
from coremltools.models import datatypes, MLModel
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models.utils import macos_version
from coremltools.models.neural_network.quantization_utils import \
    _convert_array_to_nbit_quantized_bytes, quantize_weights
import pytest


@unittest.skipIf(macos_version() < (10, 15), 'Only supported on macOS 10.15+')
class ControlFlowCorrectnessTest(unittest.TestCase):
    @classmethod
    def setup_class(cls):
        pass

    def runTest():
        pass

    def _test_model(self, model, input_dict, output_ref, delta=1e-2):
        preds = model.predict(input_dict)
        for name in output_ref:
            ref_val = output_ref[name]
            val = preds[name]
            self.assertTrue(np.allclose(val, ref_val, rtol=delta))

    def test_simple_branch(self):
        """ Test a simple if-else branch network
Exemple #23
0
class BinaryOperationTests(unittest.TestCase):
    '''
    Binary Operation Test cases
    '''
    ## Addition tests
    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_add_same_shape(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.add(x, y)

        y = torch.rand((18, 4, 5))
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_add_same_shape_multiple(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return x + y + y1 + y2 + y3

        y = torch.rand((18, 4, 5))
        y1 = torch.rand((4, 5))
        y2 = torch.rand((18, 4, 5))
        y3 = 7.234
        
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_add_tensor_scalar(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.add(x, y)

        y = 5
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_add_diff_shape(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.add(x, y)

        y = torch.rand((4, 5))
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    ## Subtraction tests
    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_sub_same_shape(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.sub(x, y)

        y = torch.rand((18, 4, 5))
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_sub_same_shape_multiple(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return x - y - y1 - y2 - y3

        y = torch.rand((18, 4, 5))
        y1 = torch.rand((4, 5))
        y2 = torch.rand((18, 4, 5))
        y3 = 7.234
        
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_sub_tensor_scalar(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.sub(x, y)

        y = 5
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_sub_diff_shape(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return torch.sub(x, y)

        y = torch.rand((4, 5))
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore

    @unittest.skipIf(macos_version() < MIN_MACOS_VERSION_10_15,
                     'macOS 10.15+ required. Skipping test.')
    def test_bianry_ops_mix_test(self, target_ios='13'):
        class Net(nn.Module):
            def forward(self, x):
                return ((x * g + a) - d * (c + b) + (a * e - g) / e) / f

        a = torch.rand((18, 4, 5))
        b = torch.rand((4, 5))
        c = torch.rand((18, 4, 5))
        d = 7.234
        e = torch.rand((5))
        f = 8.234
        g = 5
         
        torch_model = Net()  # type: ignore
        torch_model.train(False)
        _test_torch_model_single_io(torch_model, (18, 4, 5), (18, 4, 5), target_ios=target_ios)  # type: ignore