Пример #1
0
    def test_default_names(self):
        df = pd.DataFrame({"input": self.x})
        df["input"] = df["input"].apply(np.array)

        # Test with probabilities
        spec = libsvm.convert(self.libsvm_model).get_spec()
        if _is_macos() and _macos_version() >= (10, 13):
            (_, _, probability_lists) = svm_predict(self.y, self.x,
                                                    self.libsvm_model,
                                                    "-b 1 -q")
            probability_dicts = [
                dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists
            ]
            df["classProbability"] = probability_dicts
            metrics = evaluate_classifier_with_probabilities(
                spec, df, verbose=False, probabilities="classProbability")
            self.assertLess(metrics["max_probability_error"], 0.00001)

        # Test model without probabilities
        no_probability_model = svmutil.svm_train(self.prob,
                                                 svmutil.svm_parameter())
        spec = libsvm.convert(no_probability_model).get_spec()
        self.assertEqual(len(spec.description.output), 1)
        self.assertEqual(spec.description.output[0].name, u"target")
        if _is_macos() and _macos_version() >= (10, 13):
            (df["prediction"], _, _) = svm_predict(self.y, self.x,
                                                   no_probability_model, " -q")
            metrics = evaluate_classifier(spec, df, verbose=False)
            self.assertEquals(metrics["num_errors"], 0)
    def _train_convert_evaluate_assert(self, **scikit_params):
        """
        Train a scikit-learn model, convert it and then evaluate it with CoreML
        """
        scikit_model = GradientBoostingClassifier(random_state=1, **scikit_params)
        scikit_model.fit(self.X, self.target)

        # Convert the model
        spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name)

        if hasattr(scikit_model, '_init_decision_function') and scikit_model.n_classes_ > 2:
            import numpy as np
            # fix initial default prediction for multiclass classification
            # https://github.com/scikit-learn/scikit-learn/pull/12983
            if not hasattr(scikit_model, 'init_'):
                raise AssertionError
            if not hasattr(scikit_model.init_, 'priors'):
                raise AssertionError
            scikit_model.init_.priors = np.log(scikit_model.init_.priors)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = scikit_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_classifier(spec, df)
            self._check_metrics(metrics)
Пример #3
0
    def test_int_features_in_pipeline(self):

        import numpy.random as rn
        import pandas as pd

        rn.seed(0)

        x_train_dict = [
            dict((rn.randint(100), 1) for i in range(20)) for j in range(100)
        ]
        y_train = [0, 1] * 50

        from sklearn.pipeline import Pipeline
        from sklearn.feature_extraction import DictVectorizer
        from sklearn.linear_model import LogisticRegression

        pl = Pipeline([("dv", DictVectorizer()), ("lm", LogisticRegression())])
        pl.fit(x_train_dict, y_train)

        import coremltools

        model = coremltools.converters.sklearn.convert(
            pl, input_features="features", output_feature_names="target")

        if _is_macos() and _macos_version() >= (10, 13):
            x = pd.DataFrame({
                "features": x_train_dict,
                "prediction": pl.predict(x_train_dict)
            })

            cur_eval_metics = evaluate_classifier(model, x)
            self.assertEquals(cur_eval_metics["num_errors"], 0)
    def _train_convert_evaluate_assert(self, **xgboost_params):
        """
        Train a scikit-learn model, convert it and then evaluate it with CoreML
        """
        xgb_model = xgboost.XGBClassifier(**xgboost_params)
        xgb_model.fit(self.X, self.target)

        # Convert the model
        spec = xgb_converter.convert(xgb_model,
                                     self.feature_names,
                                     self.output_name,
                                     mode="classifier")

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            probabilities = xgb_model.predict_proba(self.X)
            df["classProbability"] = [
                dict(zip(xgb_model.classes_, cur_vals))
                for cur_vals in probabilities
            ]
            metrics = evaluate_classifier_with_probabilities(
                spec, df, probabilities="classProbability", verbose=False)
            self.assertEqual(metrics["num_key_mismatch"], 0)
            self.assertLess(metrics["max_probability_error"], 1e-3)
    def _train_convert_evaluate_assert(self,
                                       bt_params={},
                                       allowed_error={},
                                       **params):
        """
        Set up the unit test by loading the dataset and training a model.
        """
        # Train a model
        xgb_model = xgboost.train(bt_params, self.dtrain, **params)

        # Convert the model
        spec = xgb_converter.convert(xgb_model,
                                     self.feature_names,
                                     self.output_name,
                                     force_32bit_float=False)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = xgb_model.predict(self.dtrain)

            # Evaluate it
            metrics = evaluate_regressor(spec,
                                         df,
                                         target="target",
                                         verbose=False)
            self._check_metrics(metrics, allowed_error, bt_params)
    def _train_convert_evaluate_assert(self, bt_params=None, allowed_error=None, **params):
        """
        Set up the unit test by loading the dataset and training a model.
        """
        if bt_params is None:
            bt_params = {}
        if allowed_error is None:
            allowed_error = {}
        # Train a model
        xgb_model = xgboost.XGBRegressor(**params)
        xgb_model.fit(self.X, self.target)

        # Convert the model (feature_names can't be given because of XGboost)
        spec = xgb_converter.convert(
            xgb_model, self.feature_names, self.output_name, force_32bit_float=False
        )

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = xgb_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_regressor(spec, df, target="target", verbose=False)
            self._check_metrics(metrics, bt_params, allowed_error)
    def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels):
        ARGS = [
            {},
            {"C": 0.75, "loss": "hinge"},
            {"penalty": "l1", "dual": False},
            {"tol": 0.001, "fit_intercept": False},
            {"intercept_scaling": 1.5},
        ]

        x, y = GlmCassifierTest._generate_random_data(class_labels)
        column_names = ["x1", "x2"]
        df = pd.DataFrame(x, columns=column_names)

        for cur_args in ARGS:
            print(class_labels, cur_args)
            cur_model = LinearSVC(**cur_args)
            cur_model.fit(x, y)

            spec = convert(
                cur_model, input_features=column_names, output_feature_names="target"
            )

            if _is_macos() and _macos_version() >= (10, 13):
                df["prediction"] = cur_model.predict(x)

                cur_eval_metics = evaluate_classifier(spec, df, verbose=False)
                self.assertEqual(cur_eval_metics["num_errors"], 0)
Пример #8
0
    def _train_convert_evaluate_assert(self, **scikit_params):
        """
        Train a scikit-learn model, convert it and then evaluate it with CoreML
        """
        from sklearn.tree import DecisionTreeRegressor
        from coremltools.converters import sklearn as skl_converter

        scikit_model = DecisionTreeRegressor(random_state=1, **scikit_params)
        scikit_model.fit(self.X, self.target)

        # Convert the model
        spec = skl_converter.convert(scikit_model, self.feature_names,
                                     self.output_name)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = scikit_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_regressor(spec,
                                         df,
                                         target="target",
                                         verbose=False)
            self._check_metrics(metrics, scikit_params)
Пример #9
0
    def _test_evaluation(self, allow_slow):
        """
        Test that the same predictions are made
        """
        from svm import svm_parameter, svm_problem
        from svmutil import svm_train, svm_predict

        # Generate some smallish (poly kernels take too long on anything else) random data
        x, y = [], []
        for _ in range(50):
            cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)
            x.append([cur_x1, cur_x2])
            y.append(1 + 2 * cur_x1 + 3 * cur_x2)

        input_names = ["x1", "x2"]
        df = pd.DataFrame(x, columns=input_names)
        prob = svm_problem(y, x)

        # Parameters
        base_param = "-s 3"  # model type is epsilon SVR
        non_kernel_parameters = [
            "", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"
        ]
        kernel_parameters = [
            "",
            "-t 2 -g 1.2",  # rbf kernel
            "-t 0",  # linear kernel
            "-t 1",
            "-t 1 -d 2",
            "-t 1 -g 0.75",
            "-t 1 -d 0 -g 0.9 -r 2",  # poly kernel
            "-t 3",
            "-t 3 -g 1.3",
            "-t 3 -r 0.8",
            "-t 3 -r 0.8 -g 0.5",  # sigmoid kernel
        ]

        for param1 in non_kernel_parameters:
            for param2 in kernel_parameters:
                param_str = " ".join([base_param, param1, param2])
                print(param_str)
                param = svm_parameter(param_str)

                model = svm_train(prob, param)
                (df["prediction"], _, _) = svm_predict(y, x, model)

                spec = libsvm.convert(model,
                                      input_names=input_names,
                                      target_name="target")

                if _is_macos() and _macos_version() >= (10, 13):
                    metrics = evaluate_regressor(spec, df)
                    self.assertAlmostEqual(metrics["max_error"], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Пример #10
0
    def test_keras_1_image_bias(self):
        # define Keras model and get prediction
        input_shape = (100, 50, 3)
        model = Sequential()
        model.add(Activation("linear", input_shape=input_shape))

        data = np.ones(input_shape)
        keras_input = np.ones(input_shape)
        data[:, :, 0] = 128.0
        data[:, :, 1] = 27.0
        data[:, :, 2] = 200.0
        red_bias = -12.0
        green_bias = -20
        blue_bias = -4
        keras_input[:, :, 0] = data[:, :, 0] + red_bias
        keras_input[:, :, 1] = data[:, :, 1] + green_bias
        keras_input[:, :, 2] = data[:, :, 2] + blue_bias

        keras_preds = model.predict(np.expand_dims(keras_input, axis=0))
        keras_preds = np.transpose(keras_preds, [0, 3, 1, 2]).flatten()

        # convert to coreml and get predictions
        model_dir = tempfile.mkdtemp()
        model_path = os.path.join(model_dir, "keras.mlmodel")
        from coremltools.converters import keras as keras_converter

        coreml_model = keras_converter.convert(
            model,
            input_names=["data"],
            output_names=["output"],
            image_input_names=["data"],
            red_bias=red_bias,
            green_bias=green_bias,
            blue_bias=blue_bias,
        )

        if _is_macos() and _macos_version() >= (10, 13):
            coreml_input_dict = dict()
            coreml_input_dict["data"] = PIL.Image.fromarray(
                data.astype(np.uint8))
            coreml_preds = coreml_model.predict(
                coreml_input_dict)["output"].flatten()

            self.assertEqual(len(keras_preds), len(coreml_preds))
            max_relative_error = compare_models(keras_preds, coreml_preds)
            self.assertAlmostEqual(max(max_relative_error, 0.001),
                                   0.001,
                                   delta=1e-6)

        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
    def _train_convert_evaluate_assert(self, **scikit_params):
        scikit_model = GradientBoostingRegressor(random_state=1, **scikit_params)
        scikit_model.fit(self.X, self.target)

        # Convert the model
        spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = scikit_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_regressor(spec, df, "target", verbose=False)
            self._check_metrics(metrics, scikit_params)
    def _train_convert_evaluate_assert(self, **scikit_params):
        scikit_model = DecisionTreeClassifier(random_state=1, **scikit_params)
        scikit_model.fit(self.X, self.target)

        # Convert the model
        spec = skl_converter.convert(scikit_model, self.feature_names,
                                     self.output_name)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = scikit_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_classifier(spec, df)
            self._check_metrics(metrics, scikit_params)
Пример #13
0
    def test_pipeline_rename(self):
        # Convert
        scikit_spec = converter.convert(self.scikit_model).get_spec()
        model = MLModel(scikit_spec)
        sample_data = self.scikit_data.data[0]

        # Rename
        rename_feature(scikit_spec, "input", "renamed_input")
        renamed_model = MLModel(scikit_spec)

        # Check the predictions
        if _is_macos() and _macos_version() >= (10, 13):
            out_dict = model.predict({"input": sample_data})
            out_dict_renamed = renamed_model.predict({"renamed_input": sample_data})
            self.assertAlmostEqual(list(out_dict.keys()), list(out_dict_renamed.keys()))
            self.assertAlmostEqual(
                list(out_dict.values()), list(out_dict_renamed.values())
            )
Пример #14
0
    def _evaluation_test_helper_no_probability(self, labels, allow_slow):
        # Generate some random data.
        # This unit test should not rely on scikit learn for test data.
        x, y = [], []
        random.seed(42)
        for _ in range(50):
            x.append([
                random.gauss(200, 30),
                random.gauss(-100, 22),
                random.gauss(100, 42)
            ])
            y.append(random.choice(labels))
        # make sure first label is seen first, second is seen second, and so on.
        for i, val in enumerate(labels):
            y[i] = val
        column_names = ["x1", "x2", "x3"]
        prob = svmutil.svm_problem(y, x)

        df = pd.DataFrame(x, columns=column_names)

        for param1 in self.non_kernel_parameters:
            for param2 in self.kernel_parameters:
                param_str = " ".join([self.base_param, param1, param2])
                print("PARAMS: ", param_str)
                param = svm_parameter(param_str)

                model = svm_train(prob, param)

                # Get predictions with probabilities as dictionaries
                (df["prediction"], _, _) = svm_predict(y, x, model, " -q")

                spec = libsvm.convert(model, column_names, "target")

                if _is_macos() and _macos_version() >= (10, 13):
                    metrics = evaluate_classifier(spec, df, verbose=False)
                    self.assertEquals(metrics["num_errors"], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Пример #15
0
    def _evaluation_test_helper_with_probability(self, labels, allow_slow):
        import copy

        df = pd.DataFrame(self.x, columns=self.column_names)
        y = copy.copy(self.y)
        for i, val in enumerate(labels):
            y[i] = val
        probability_param = "-b 1"

        for param1 in self.non_kernel_parameters:
            for param2 in self.kernel_parameters:
                param_str = " ".join(
                    [self.base_param, param1, param2, probability_param])
                # print("PARAMS: ", param_str)
                param = svm_parameter(param_str)

                model = svm_train(self.prob, param)

                # Get predictions with probabilities as dictionaries
                (df["prediction"], _,
                 probability_lists) = svm_predict(y, self.x, model,
                                                  probability_param + " -q")
                probability_dicts = [
                    dict(zip([1, 2], cur_vals))
                    for cur_vals in probability_lists
                ]
                df["probabilities"] = probability_dicts

                spec = libsvm.convert(model, self.column_names, "target",
                                      "probabilities")

                if _is_macos() and _macos_version() >= (10, 13):
                    metrics = evaluate_classifier_with_probabilities(
                        spec, df, verbose=False)
                    self.assertEquals(metrics["num_key_mismatch"], 0)
                    self.assertLess(metrics["max_probability_error"], 0.00001)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Пример #16
0
    def _train_convert_evaluate_assert(self, **scikit_params):
        """
        Train a scikit-learn model, convert it and then evaluate it with CoreML
        """
        scikit_model = GradientBoostingClassifier(random_state=1,
                                                  **scikit_params)
        scikit_model.fit(self.X, self.target)

        # Convert the model
        spec = skl_converter.convert(scikit_model, self.feature_names,
                                     self.output_name)

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(self.X, columns=self.feature_names)
            df["prediction"] = scikit_model.predict(self.X)

            # Evaluate it
            metrics = evaluate_classifier(spec, df)
            self._check_metrics(metrics)
Пример #17
0
    def test_classifier(self):
        np.random.seed(1988)

        print("running test classifier")

        input_dim = 5
        num_hidden = 12
        num_classes = 6
        input_length = 3

        model = Sequential()
        model.add(
            LSTM(
                num_hidden,
                input_dim=input_dim,
                input_length=input_length,
                return_sequences=False,
            ))
        model.add(Dense(num_classes, activation="softmax"))

        model.set_weights(
            [np.random.rand(*w.shape) for w in model.get_weights()])

        input_names = ["input"]
        output_names = ["zzzz"]
        class_labels = ["a", "b", "c", "d", "e", "f"]
        predicted_feature_name = "pf"
        coremlmodel = keras_converter.convert(
            model,
            input_names,
            output_names,
            class_labels=class_labels,
            predicted_feature_name=predicted_feature_name,
            predicted_probabilities_output=output_names[0],
        )

        if _is_macos() and _macos_version() >= (10, 13):
            inputs = np.random.rand(input_dim)
            outputs = coremlmodel.predict({"input": inputs})
            # this checks that the dictionary got the right name and type
            self.assertEquals(type(outputs[output_names[0]]), type({"a": 0.5}))
Пример #18
0
    def test_boston_OHE_plus_normalizer(self):

        data = load_boston()

        pl = Pipeline([
            ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)),
            ("Scaler", StandardScaler()),
        ])

        pl.fit(data.data, data.target)

        # Convert the model
        spec = convert(pl, data.feature_names, "out")

        if _is_macos() and _macos_version() >= (10, 13):
            input_data = [
                dict(zip(data.feature_names, row)) for row in data.data
            ]
            output_data = [{"out": row} for row in pl.transform(data.data)]

            result = evaluate_transformer(spec, input_data, output_data)
            assert result["num_errors"] == 0
Пример #19
0
    def _test_conversion(self, data, trained_dict_vectorizer):

        X = trained_dict_vectorizer.transform(data)

        m = sklearn.convert(
            trained_dict_vectorizer,
            input_features="features",
            output_feature_names="output",
        )

        if _is_macos() and _macos_version() >= (10, 13):
            ret = evaluate_transformer(
                m,
                [{
                    "features": row
                } for row in data],
                [{
                    "output": x_r
                } for x_r in X],
                True,
            )
            assert ret["num_errors"] == 0
Пример #20
0
    def _test_prob_model(self, param1, param2):
        probability_param = "-b 1"
        df = self.df

        param_str = " ".join([self.base_param, param1, param2, probability_param])
        param = svmutil.svm_parameter(param_str)
        model = svm_train(self.prob, param)

        # Get predictions with probabilities as dictionaries
        (df["prediction"], _, probability_lists) = svm_predict(
            self.y, self.x, model, probability_param + " -q"
        )
        probability_dicts = [
            dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists
        ]
        df["probabilities"] = probability_dicts

        spec = libsvm.convert(model, self.column_names, "target", "probabilities")

        if _is_macos() and _macos_version() >= (10, 13):
            metrics = evaluate_classifier_with_probabilities(spec, df, verbose=False)
            self.assertEqual(metrics["num_key_mismatch"], 0)
            self.assertLess(metrics["max_probability_error"], 0.00001)
Пример #21
0
    def _test_boston_OHE_plus_trees(self, loss='ls'):

        data = load_boston()

        pl = Pipeline([
            ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)),
            ("Trees", GradientBoostingRegressor(random_state=1, loss=loss)),
        ])

        pl.fit(data.data, data.target)

        # Convert the model
        spec = convert(pl, data.feature_names, "target")

        if _is_macos() and _macos_version() >= (10, 13):
            # Get predictions
            df = pd.DataFrame(data.data, columns=data.feature_names)
            df["prediction"] = pl.predict(data.data)

            # Evaluate it
            result = evaluate_regressor(spec, df, "target", verbose=False)

            assert result["max_error"] < 0.0001
Пример #22
0
    def _conversion_and_evaluation_helper_for_logistic_regression(
            self, class_labels):
        options = {
            "C": (0.1, 1.0, 2.0),
            "fit_intercept": (True, False),
            "class_weight": ("balanced", None),
            "solver": ("newton-cg", "lbfgs", "liblinear", "sag"),
        }

        # Generate a list of all combinations of options and the default parameters
        product = itertools.product(*options.values())
        args = [{}] + [dict(zip(options.keys(), p)) for p in product]

        x, y = GlmCassifierTest._generate_random_data(class_labels)
        column_names = ["x1", "x2"]
        df = pd.DataFrame(x, columns=column_names)

        for cur_args in args:
            print(class_labels, cur_args)
            cur_model = LogisticRegression(**cur_args)
            cur_model.fit(x, y)

            spec = convert(cur_model,
                           input_features=column_names,
                           output_feature_names="target")

            if _is_macos() and _macos_version() >= (10, 13):
                probability_lists = cur_model.predict_proba(x)
                df["classProbability"] = [
                    dict(zip(cur_model.classes_, cur_vals))
                    for cur_vals in probability_lists
                ]

                metrics = evaluate_classifier_with_probabilities(
                    spec, df, probabilities="classProbability", verbose=False)
                self.assertEquals(metrics["num_key_mismatch"], 0)
                self.assertLess(metrics["max_probability_error"], 0.00001)
Пример #23
0
    def test_input_names(self):
        data = load_boston()
        df = pd.DataFrame({"input": data["data"].tolist()})
        df["input"] = df["input"].apply(np.array)

        # Default values
        spec = libsvm.convert(self.libsvm_model)
        if _is_macos() and _macos_version() >= (10, 13):
            (df["prediction"], _,
             _) = svmutil.svm_predict(data["target"], data["data"].tolist(),
                                      self.libsvm_model)
            metrics = evaluate_regressor(spec, df)
            self.assertAlmostEqual(metrics["max_error"], 0)

        # One extra parameters. This is legal/possible.
        num_inputs = len(data["data"][0])
        spec = libsvm.convert(self.libsvm_model, input_length=num_inputs + 1)

        # Not enought input names.
        input_names = ["this", "is", "not", "enought", "names"]
        with self.assertRaises(ValueError):
            libsvm.convert(self.libsvm_model, input_names=input_names)
        with self.assertRaises(ValueError):
            libsvm.convert(self.libsvm_model, input_length=num_inputs - 1)
Пример #24
0
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

import unittest
from coremltools._deps import _HAS_SKLEARN
import numpy.random as rn
import numpy as np
from coremltools.models.utils import evaluate_transformer, _macos_version, _is_macos

if _HAS_SKLEARN:
    from sklearn.preprocessing import Imputer
    from coremltools.converters import sklearn as converter


@unittest.skipUnless(
    _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
)
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class NumericalImputerTestCase(unittest.TestCase):
    """
    Unit test class for testing scikit-learn converter.
    """

    @staticmethod
    def test_conversion_boston():

        from sklearn.datasets import load_boston

        scikit_data = load_boston()

        sh = scikit_data.data.shape
Пример #25
0
        snr_thresh=15,
        psnr_thresh=30,
        cpu_only=False,
    ):
        coreml_out_dict = coreml_model.predict(input_dict, useCPUOnly=cpu_only)
        for out_ in list(ref_output_dict.keys()):
            ref_out = ref_output_dict[out_].flatten()
            coreml_out = coreml_out_dict[out_].flatten()
            self.assertEquals(len(coreml_out), len(ref_out))
            self._compare_predictions_numerical(ref_out,
                                                coreml_out,
                                                snr_thresh=snr_thresh,
                                                psnr_thresh=psnr_thresh)


@unittest.skipUnless(_is_macos(), "Only supported for MacOS platform.")
class StressTest(CorrectnessTest):
    def runTest(self):
        pass

    def test_data_reorganize(self, cpu_only=False):
        def get_coreml_model_reorganize(X, params):
            eval = True
            mlmodel = None
            try:
                input_dim = X.shape[2:]
                input_features = [("data", datatypes.Array(*input_dim))]
                output_features = [("output", None)]
                builder = neural_network.NeuralNetworkBuilder(
                    input_features, output_features)
                builder.add_reorganize_data(
Пример #26
0
class MLModelTest(unittest.TestCase):
    @classmethod
    def setUpClass(self):

        spec = Model_pb2.Model()
        spec.specificationVersion = coremltools.SPECIFICATION_VERSION

        features = ["feature_1", "feature_2"]
        output = "output"
        for f in features:
            input_ = spec.description.input.add()
            input_.name = f
            input_.type.doubleType.MergeFromString(b"")

        output_ = spec.description.output.add()
        output_.name = output
        output_.type.doubleType.MergeFromString(b"")

        lr = spec.glmRegressor
        lr.offset.append(0.1)
        weights = lr.weights.add()
        coefs = [1.0, 2.0]
        for i in coefs:
            weights.value.append(i)

        spec.description.predictedFeatureName = "output"
        self.spec = spec

    def test_model_creation(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename)
        model = MLModel(filename)
        self.assertIsNotNone(model)

    def test_model_save_no_extension(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        filename = tempfile.mktemp(suffix="")
        save_spec(self.spec, filename) # appends .mlmodel extension when it is not provided
        self.assertFalse(os.path.exists(filename))

        filename = filename + ".mlmodel"
        self.assertTrue(os.path.exists(filename))

        model = MLModel(filename)
        self.assertIsNotNone(model)
        os.remove(filename)

    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author, "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license, "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription, "Test model"
        )

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString, "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"], "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        # self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(model.input_description["feature_1"], "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_predict_api(self):
        model = MLModel(self.spec)
        preds = model.predict({"feature_1": 1.0, "feature_2": 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds["output"], 3.1)

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_input(self):
        rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({"renamed_feature": 1.0, "feature_2": 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds["output"], 3.1)
        # reset the spec for next run
        rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True)

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_input_bad(self):
        rename_feature(self.spec, "blah", "bad_name", rename_inputs=True)
        model = MLModel(self.spec)
        preds = model.predict({"feature_1": 1.0, "feature_2": 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds["output"], 3.1)

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_output(self):
        rename_feature(
            self.spec,
            "output",
            "renamed_output",
            rename_inputs=False,
            rename_outputs=True,
        )
        model = MLModel(self.spec)
        preds = model.predict({"feature_1": 1.0, "feature_2": 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds["renamed_output"], 3.1)
        rename_feature(
            self.spec,
            "renamed_output",
            "output",
            rename_inputs=False,
            rename_outputs=True,
        )

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_output_bad(self):
        rename_feature(
            self.spec, "blah", "bad_name", rename_inputs=False, rename_outputs=True
        )
        model = MLModel(self.spec)
        preds = model.predict({"feature_1": 1.0, "feature_2": 1.0})
        self.assertIsNotNone(preds)
        self.assertEqual(preds["output"], 3.1)

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_future_version(self):
        self.spec.specificationVersion = 10000
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # this model should exist, but throw an exception when we try to use
        # predict because the engine doesn't support this model version
        self.assertIsNotNone(model)
        with self.assertRaises(Exception):
            try:
                model.predict({})
            except Exception as e:
                assert "Core ML model specification version" in str(e)
                raise
        self.spec.specificationVersion = 1

    @unittest.skipUnless(
        _is_macos() and _macos_version() < (10, 13), "Only supported on macOS 10.13-"
    )
    def test_MLModel_warning(self):
        self.spec.specificationVersion = 3
        import warnings

        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            model = MLModel(self.spec)
            assert len(w) == 1
            assert issubclass(w[-1].category, RuntimeWarning)
            assert "not able to run predict()" in str(w[-1].message)
        self.spec.specificationVersion = 1
        model = MLModel(self.spec)

    def test_convert_nn_spec_to_half_precision(self):
        # simple network with quantization layer
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        weights = np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(
            name="inner_product",
            W=weights,
            b=None,
            input_channels=3,
            output_channels=3,
            has_bias=False,
            input_name="data",
            output_name="out",
        )
        model = MLModel(builder.spec)
        spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)

        # simple network without quantization layer
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_lrn(
            name="lrn",
            input_name="data",
            output_name="out",
            alpha=2,
            beta=3,
            local_size=1,
            k=8,
        )
        model = MLModel(builder.spec)
        spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)

    @unittest.skip
    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        assert model.get_spec().specificationVersion == 1

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=True)
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # simple neural network with only spec 1 layer
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation("relu", "RELU", "data", "out")
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        assert model.get_spec().specificationVersion == 3

    def test_multiarray_type_convert_to_float(self):
        input_features = [("data", datatypes.Array(2))]
        output_features = [("out", datatypes.Array(2))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_ceil("ceil", "data", "out")
        spec = builder.spec
        self.assertEqual(
            spec.description.input[0].type.multiArrayType.dataType,
            Model_pb2.ArrayFeatureType.DOUBLE,
        )
        self.assertEqual(
            spec.description.output[0].type.multiArrayType.dataType,
            Model_pb2.ArrayFeatureType.DOUBLE,
        )
        convert_double_to_float_multiarray_type(spec)
        self.assertEqual(
            spec.description.input[0].type.multiArrayType.dataType,
            Model_pb2.ArrayFeatureType.FLOAT32,
        )
        self.assertEqual(
            spec.description.output[0].type.multiArrayType.dataType,
            Model_pb2.ArrayFeatureType.FLOAT32,
        )

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_multiarray_to_image_input_util(self):
        H, W, C = 1, 1, 3
        input_features = [("data", datatypes.Array(C, H, W))]
        output_features = [("out", datatypes.Array(C, H, W))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)
        mlmodel = make_image_input(
            mlmodel,
            "data",
            red_bias=-5,
            green_bias=-6,
            blue_bias=-2.5,
            scale=10.0,
            image_format="NCHW",
        )
        x = np.array([4, 2, 5], dtype=np.uint8)
        x = np.reshape(x, (H, W, C))
        pil_img = PIL.Image.fromarray(x)
        y = mlmodel.predict({"data": pil_img}, useCPUOnly=True)["out"]
        self.assertEqual(y.shape, (C, H, W))
        np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5])

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_multiarray_to_image_input_util_transpose_elimination(self):
        H, W, C = 1, 1, 3
        input_features = [("data", datatypes.Array(H, W, C))]
        output_features = [("out", datatypes.Array(H, W, C))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_transpose("transpose", [2, 0, 1], "data", "transpose")
        builder.add_activation("linear", "LINEAR", "transpose", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)
        mlmodel = make_image_input(
            mlmodel,
            "data",
            red_bias=-5,
            green_bias=-6,
            blue_bias=-2.5,
            scale=10.0,
            image_format="NHWC",
        )
        x = np.array([4, 2, 5], dtype=np.uint8)
        x = np.reshape(x, (H, W, C))
        pil_img = PIL.Image.fromarray(x)
        y = mlmodel.predict({"data": pil_img}, useCPUOnly=True)["out"]
        self.assertEqual(y.shape, (H, W, C))
        np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5])

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_multiarray_to_image_input_util_HWC_format(self):
        H, W, C = 1, 1, 3
        input_features = [("data", datatypes.Array(H, W, C))]
        output_features = [("out", datatypes.Array(H, W, C))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)
        mlmodel = make_image_input(
            mlmodel,
            "data",
            red_bias=-5,
            green_bias=-6,
            blue_bias=-2.5,
            scale=10.0,
            image_format="NHWC",
        )
        x = np.array([4, 2, 5], dtype=np.uint8)
        x = np.reshape(x, (H, W, C))
        pil_img = PIL.Image.fromarray(x)
        y = mlmodel.predict({"data": pil_img}, useCPUOnly=True)["out"]
        self.assertEqual(y.shape, (H, W, C))
        np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5])

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_nn_classifier_util(self):
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)
        mlmodel = make_nn_classifier(
            mlmodel,
            class_labels=["a", "b", "c"],
            predicted_feature_name="out_confidence",
            predicted_probabilities_output="out",
        )
        out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True)
        self.assertEqual(out_dict["out_confidence"], "c")
        self.assertEqual(
            mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier"
        )

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_nn_classifier_util_file(self):
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)

        class_labels = ["a", "b", "c"]
        with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f:
            f.write("\n".join(class_labels))
            f.flush()
            mlmodel = make_nn_classifier(
                mlmodel,
                class_labels=f.name,
                predicted_feature_name="out_confidence",
                predicted_probabilities_output="out",
            )
        out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True)
        self.assertEqual(out_dict["out_confidence"], "c")
        self.assertEqual(
            mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier"
        )

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_output_nn_classifier(self):
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)

        class_labels = ["a", "b", "c"]
        mlmodel = make_nn_classifier(mlmodel, class_labels=["a", "b", "c"])

        # rename output
        spec = mlmodel.get_spec()
        rename_feature(spec, "out", "new_out_name")
        mlmodel = MLModel(spec)

        out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True)
        self.assertEqual(out_dict["classLabel"], "c")
        self.assertTrue("new_out_name" in out_dict)
        self.assertTrue(isinstance(out_dict["new_out_name"], dict))

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
    )
    def test_rename_image_input(self):
        input_features = [("data", datatypes.Array(3, 1, 1))]
        output_features = [("out", datatypes.Array(3, 1, 1))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        # make an image input
        mlmodel = make_image_input(MLModel(spec), "data", image_format="NCHW", scale=2.0)
        # rename the input
        spec = mlmodel.get_spec()
        rename_feature(spec, "data", "new_input_name")
        mlmodel = MLModel(spec)
        # test
        x = np.array([4, 5, 6], dtype=np.uint8).reshape(1, 1, 3)
        pil_img = PIL.Image.fromarray(x)
        out = mlmodel.predict({"new_input_name": pil_img}, useCPUOnly=True)['out']
        np.testing.assert_equal(out, np.array([8.0, 10.0, 12.0]).reshape(3, 1, 1))

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (12, 0) and _HAS_TORCH, "Only supported on macOS 12+"
    )
    def test_rename_feature_mlprogram(self):
        torch_model = _torch.nn.ReLU().eval()
        model = coremltools.convert(
            _torch.jit.trace(torch_model, _torch.rand(3, )),
            inputs=[coremltools.TensorType(shape=(3,))],
            convert_to='mlprogram'
        )
        spec = model.get_spec()
        input_name = spec.description.input[0].name
        output_name = spec.description.output[0].name

        # rename input
        rename_feature(spec, input_name, "new_input_name")
        self.assertEqual(spec.description.input[0].name, "new_input_name")
        model = coremltools.models.MLModel(spec)
        out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})[output_name]
        self.assertEqual(out[0], 1.0)

        # rename output
        rename_feature(spec, output_name, "new_output_name")
        self.assertEqual(spec.description.output[0].name, "new_output_name")
        model = coremltools.models.MLModel(spec)
        out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})["new_output_name"]
        self.assertEqual(out[1], 2.0)

    @unittest.skipUnless(
        _is_macos() and _macos_version() >= (12, 0) and _HAS_TORCH, "Only supported on macOS 12+"
    )
    def test_rename_feature_classifier_mlprogram(self):
        torch_model = _torch.nn.ReLU().eval()
        model = coremltools.convert(
            _torch.jit.trace(torch_model, _torch.rand(3, )),
            inputs=[coremltools.TensorType(shape=(3,))],
            classifier_config=coremltools.ClassifierConfig(['a', 'b', 'c']),
            convert_to='mlprogram'
        )
        spec = model.get_spec()
        input_name = spec.description.input[0].name

        rename_feature(spec, 'classLabel', 'highestProbClass')
        model = coremltools.models.MLModel(spec)
        output_class = model.predict({input_name: np.array([1.0, 2.0, 3.0])})['highestProbClass']
        self.assertEqual(output_class, 'c')
Пример #27
0
    def _evaluation_test_helper(
        self,
        class_labels,
        use_probability_estimates,
        allow_slow,
        allowed_prob_delta=0.00001,
    ):
        # Parameters to test
        kernel_parameters = [
            {},
            {
                "kernel": "rbf",
                "gamma": 1.2
            },
            {
                "kernel": "linear"
            },
            {
                "kernel": "poly"
            },
            {
                "kernel": "poly",
                "degree": 2
            },
            {
                "kernel": "poly",
                "gamma": 0.75
            },
            {
                "kernel": "poly",
                "degree": 0,
                "gamma": 0.9,
                "coef0": 2
            },
            {
                "kernel": "sigmoid"
            },
            {
                "kernel": "sigmoid",
                "gamma": 1.3
            },
            {
                "kernel": "sigmoid",
                "coef0": 0.8
            },
            {
                "kernel": "sigmoid",
                "coef0": 0.8,
                "gamma": 0.5
            },
        ]
        non_kernel_parameters = [
            {},
            {
                "C": 1
            },
            {
                "C": 1.5,
                "shrinking": True
            },
            {
                "C": 0.5,
                "shrinking": False
            },
        ]

        # Generate some random data
        x, y = [], []
        random.seed(42)
        for _ in range(50):
            x.append([
                random.gauss(200, 30),
                random.gauss(-100, 22),
                random.gauss(100, 42)
            ])
            y.append(random.choice(class_labels))
        column_names = ["x1", "x2", "x3"]
        # make sure first label is seen first, second is seen second, and so on.
        for i, val in enumerate(class_labels):
            y[i] = val
        df = pd.DataFrame(x, columns=column_names)

        # Test
        for param1 in non_kernel_parameters:
            for param2 in kernel_parameters:
                cur_params = param1.copy()
                cur_params.update(param2)
                cur_params["probability"] = use_probability_estimates
                cur_params["max_iter"] = 10  # Don't want test to take too long
                print("cur_params=" + str(cur_params))

                cur_model = SVC(**cur_params)
                cur_model.fit(x, y)

                spec = scikit_converter.convert(cur_model, column_names,
                                                "target")

                if _is_macos() and _macos_version() >= (10, 13):
                    if use_probability_estimates:
                        probability_lists = cur_model.predict_proba(x)
                        df["classProbability"] = [
                            dict(zip(cur_model.classes_, cur_vals))
                            for cur_vals in probability_lists
                        ]
                        metrics = evaluate_classifier_with_probabilities(
                            spec,
                            df,
                            probabilities="classProbability",
                            verbose=True)
                        self.assertEquals(metrics["num_key_mismatch"], 0)
                        self.assertLess(metrics["max_probability_error"],
                                        allowed_prob_delta)
                    else:
                        df["prediction"] = cur_model.predict(x)
                        metrics = evaluate_classifier(spec, df, verbose=False)
                        self.assertEquals(metrics["num_errors"], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Пример #28
0
import coremltools
from coremltools.models import datatypes, MLModel
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models.neural_network.quantization_utils import (
    _convert_array_to_nbit_quantized_bytes,
    quantize_weights,
)
from coremltools.models.utils import _macos_version, _is_macos

MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_14_MACOS_VERSION = (10, 14)
LAYERS_10_15_MACOS_VERSION = (10, 15)


@unittest.skipIf(
    not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION,
    "Only supported on macOS 10.15+",
)
class ControlFlowCorrectnessTest(unittest.TestCase):
    @classmethod
    def setup_class(cls):
        pass

    def runTest():
        pass

    def _test_model(self, model, input_dict, output_ref, delta=1e-2):
        preds = model.predict(input_dict)
        for name in output_ref:
            ref_val = output_ref[name]
            val = preds[name]
Пример #29
0
    def _test_evaluation(self, allow_slow):
        """
        Test that the same predictions are made
        """

        # Generate some smallish (some kernels take too long on anything else) random data
        x, y = [], []
        for _ in range(50):
            cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)
            x.append([cur_x1, cur_x2])
            y.append(1 + 2 * cur_x1 + 3 * cur_x2)

        input_names = ["x1", "x2"]
        df = pd.DataFrame(x, columns=input_names)

        # Parameters to test
        kernel_parameters = [
            {},
            {
                "kernel": "rbf",
                "gamma": 1.2
            },
            {
                "kernel": "linear"
            },
            {
                "kernel": "poly"
            },
            {
                "kernel": "poly",
                "degree": 2
            },
            {
                "kernel": "poly",
                "gamma": 0.75
            },
            {
                "kernel": "poly",
                "degree": 0,
                "gamma": 0.9,
                "coef0": 2
            },
            {
                "kernel": "sigmoid"
            },
            {
                "kernel": "sigmoid",
                "gamma": 1.3
            },
            {
                "kernel": "sigmoid",
                "coef0": 0.8
            },
            {
                "kernel": "sigmoid",
                "coef0": 0.8,
                "gamma": 0.5
            },
        ]
        non_kernel_parameters = [
            {},
            {
                "C": 1
            },
            {
                "C": 1.5,
                "epsilon": 0.5,
                "shrinking": True
            },
            {
                "C": 0.5,
                "epsilon": 1.5,
                "shrinking": False
            },
        ]

        # Test
        for param1 in non_kernel_parameters:
            for param2 in kernel_parameters:
                cur_params = param1.copy()
                cur_params.update(param2)
                print("cur_params=" + str(cur_params))

                cur_model = SVR(**cur_params)
                cur_model.fit(x, y)
                df["prediction"] = cur_model.predict(x)

                spec = sklearn_converter.convert(cur_model, input_names,
                                                 "target")

                if _is_macos() and _macos_version() >= (10, 13):
                    metrics = evaluate_regressor(spec, df)
                    self.assertAlmostEqual(metrics["max_error"], 0)

                if not allow_slow:
                    break

            if not allow_slow:
                break
Пример #30
0
    def test_keras_2_image_bias(self):
        # define Keras model and get prediction
        input_shape1 = (100, 60, 3)
        input_shape2 = (23, 45, 3)

        data1 = Input(shape=input_shape1)
        data2 = Input(shape=input_shape2)
        a_pool = GlobalMaxPooling2D()(data1)
        b_pool = GlobalMaxPooling2D()(data2)
        output = keras.layers.add([a_pool, b_pool])
        model = Model(inputs=[data1, data2], outputs=output)

        data1 = np.ones(input_shape1)
        data2 = np.ones(input_shape2)
        keras_input1 = np.ones(input_shape1)
        keras_input2 = np.ones(input_shape2)

        data1[:, :, 0] = 100.0
        data1[:, :, 1] = 79.0
        data1[:, :, 2] = 194.0

        data2[:, :, 0] = 130.0
        data2[:, :, 1] = 91.0
        data2[:, :, 2] = 11.0

        red_bias1 = -88.0
        green_bias1 = -2
        blue_bias1 = -40

        red_bias2 = -100.0
        green_bias2 = -29
        blue_bias2 = -15

        keras_input1[:, :, 0] = data1[:, :, 2] + blue_bias1
        keras_input1[:, :, 1] = data1[:, :, 1] + green_bias1
        keras_input1[:, :, 2] = data1[:, :, 0] + red_bias1

        keras_input2[:, :, 0] = data2[:, :, 0] + red_bias2
        keras_input2[:, :, 1] = data2[:, :, 1] + green_bias2
        keras_input2[:, :, 2] = data2[:, :, 2] + blue_bias2

        keras_preds = model.predict([
            np.expand_dims(keras_input1, axis=0),
            np.expand_dims(keras_input2, axis=0)
        ])
        keras_preds = keras_preds.flatten()

        # convert to coreml and get predictions
        model_dir = tempfile.mkdtemp()
        model_path = os.path.join(model_dir, "keras.mlmodel")
        from coremltools.converters import keras as keras_converter

        coreml_model = keras_converter.convert(
            model,
            input_names=["data1", "data2"],
            output_names=["output"],
            image_input_names=["data1", "data2"],
            red_bias={
                "data1": red_bias1,
                "data2": red_bias2
            },
            green_bias={
                "data1": green_bias1,
                "data2": green_bias2
            },
            blue_bias={
                "data1": blue_bias1,
                "data2": blue_bias2
            },
            is_bgr={
                "data1": True,
                "data2": False
            },
        )

        if _is_macos() and _macos_version() >= (10, 13):
            coreml_input_dict = dict()
            coreml_input_dict["data1"] = PIL.Image.fromarray(
                data1.astype(np.uint8))
            coreml_input_dict["data2"] = PIL.Image.fromarray(
                data2.astype(np.uint8))
            coreml_preds = coreml_model.predict(
                coreml_input_dict)["output"].flatten()

            # compare
            self.assertEqual(len(keras_preds), len(coreml_preds))
            max_relative_error = compare_models(keras_preds, coreml_preds)
            self.assertAlmostEqual(max(max_relative_error, 0.001),
                                   0.001,
                                   delta=1e-6)

        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)