コード例 #1
0
    def test_export_coreml(self):
        import coremltools
        model = self.model
        for flexible_shape_on in [True, False]:
            filename = tempfile.mkstemp('my_style_transfer.mlmodel')[1]
            model.export_coreml(filename,
                include_flexible_shape = flexible_shape_on)
            if not flexible_shape_on or _mac_ver() >= (10,14):
                coreml_model = coremltools.models.MLModel(filename)

                mac_os_version_threshold = (10,14) if flexible_shape_on else (10,13)

                if _mac_ver() >= mac_os_version_threshold:
                    img = self.style_sf[0:2][self.style_feature][0]
                    img_fixed = tc.image_analysis.resize(img, 256, 256, 3)
                    img = self._coreml_python_predict(coreml_model, img_fixed)
                    self.assertEqual(img.shape, (256, 256, 3))

                    if flexible_shape_on:
                        # Test for flexible shape
                        img = self.style_sf[0:2][self.style_feature][1]
                        img_fixed = tc.image_analysis.resize(img, 512, 512, 3)
                        img = self._coreml_python_predict(coreml_model, img_fixed)
                        self.assertEqual(img.shape, (512, 512, 3))

                # Also check if we can train a second model and export it (there could
                # be naming issues in mxnet)
                filename2 = tempfile.mkstemp('my_style_transfer2.mlmodel')[1]
                # We also test at the same time if we can export a model with a single
                # class

                model2 = tc.style_transfer.create(self.style_sf, self.content_sf, max_iterations=1)
                model2.export_coreml(filename2)
コード例 #2
0
    def test_export_coreml(self):
        import coremltools
        import platform

        model = self.model
        for flexible_shape_on in [True, False]:
            filename = tempfile.mkstemp("my_style_transfer.mlmodel")[1]
            model.export_coreml(filename,
                                include_flexible_shape=flexible_shape_on)

            ## Metadata test
            coreml_model = coremltools.models.MLModel(filename)
            self.assertDictEqual(
                {
                    "com.github.apple.turicreate.version": tc.__version__,
                    "com.github.apple.os.platform": platform.platform(),
                    "type": "style_transfer",
                    "content_feature": self.content_feature,
                    "style_feature": self.style_feature,
                    "model": self.pre_trained_model,
                    "max_iterations": "1",
                    "training_iterations": "1",
                    "num_styles": str(self.num_styles),
                    "version": "1",
                },
                dict(coreml_model.user_defined_metadata),
            )
            expected_result = "Style transfer created by Turi Create (version %s)" % (
                tc.__version__)
            self.assertEquals(expected_result, coreml_model.short_description)

            ## Correctness test
            if not flexible_shape_on or _mac_ver() >= (10, 14):
                coreml_model = coremltools.models.MLModel(filename)

                mac_os_version_threshold = (10,
                                            14) if flexible_shape_on else (10,
                                                                           13)
                if _mac_ver() >= mac_os_version_threshold:
                    img = self.style_sf[0:2][self.style_feature][0]
                    img_fixed = tc.image_analysis.resize(img, 256, 256, 3)
                    img = self._coreml_python_predict(coreml_model, img_fixed)
                    self.assertEqual(img.shape, (256, 256, 3))

                    if flexible_shape_on:
                        # Test for flexible shape
                        img = self.style_sf[0:2][self.style_feature][1]
                        img_fixed = tc.image_analysis.resize(img, 512, 512, 3)
                        img = self._coreml_python_predict(
                            coreml_model, img_fixed)
                        self.assertEqual(img.shape, (512, 512, 3))
コード例 #3
0
class ExploreTest(unittest.TestCase):
    @unittest.skipIf(_mac_ver() < (10, 12),
                     "macOS-only test; UISoup doesn't work on Linux")
    @unittest.skipIf(
        _mac_ver() > (10, 13),
        "macOS 10.14 appears to have broken the UX flow to prompt for accessibility access",
    )
    @unittest.skipIf(not (six.PY2),
                     "Python 2.7-only test; UISoup doesn't work on 3.x")
    def test_sanity_on_macOS(self):
        """
        Create a simple SFrame, containing a very unique string.
        Then, using uisoup, look for this string within a window
        and assert that it appears.
        """

        # Library imports
        from uisoup import uisoup

        # Generate some test data
        unique_str = repr(uuid.uuid4())
        sf = tc.SFrame({"a": [1, 2, 3], "b": ["hello", "world", unique_str]})

        # Run the explore view and make sure we can see our unique string
        sf.explore()
        time.sleep(2)

        window = None
        try:
            window = uisoup.get_window("Turi*Create*Visualization")
            result = window.findall(value=unique_str)
            self.assertEqual(
                len(result),
                1,
                ("Expected to find exactly one element containing the unique"
                 "string %s.") % unique_str,
            )
            first = result[0]
            self.assertEqual(
                first.acc_name,
                unique_str,
                ("Expected to find the unique string %s as the name of the found"
                 "element. Instead, got %s.") % (unique_str, first.acc_name),
            )

        finally:
            if window is not None:
                # Kill the explore process
                os.kill(window.proc_id, signal.SIGTERM)
コード例 #4
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """

        # Save the model as a CoreML model file
        filename = tempfile.mkstemp('ImageSimilarity.mlmodel')[1]
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)

        # Get model distances for comparison
        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, *reversed(self.input_image_shape))
        tc_ret = self.model.query(img_fixed, k=self.sf.num_rows())

        if _mac_ver() >= (10, 13):
            from PIL import Image as _PIL_Image
            pil_img = _PIL_Image.fromarray(img_fixed.pixel_data)
            coreml_ret = coreml_model.predict({'awesome_image': pil_img})

            # Compare distances
            coreml_distances = np.array(sorted(coreml_ret['distance']))
            tc_distances = tc_ret['distance'].to_numpy()
            self.assertListAlmostEquals(tc_distances, coreml_distances, 0.02)
コード例 #5
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        filename = tempfile.mkstemp('bingo.mlmodel')[1]
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        coreml_model = coremltools.models.MLModel(filename)
        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret['coordinates'].shape[1], 4)
            self.assertEqual(ret['confidence'].shape[1], len(_CLASSES))
            self.assertEqual(ret['coordinates'].shape[0],
                             ret['confidence'].shape[0])
            # A numeric comparison of the resulting of top bounding boxes is
            # not that meaningful unless the model has converged

        # Also check if we can train a second model and export it (there could
        # be naming issues in mxnet)
        filename2 = tempfile.mkstemp('bingo2.mlmodel')[1]
        # We also test at the same time if we can export a model with a single
        # class
        sf = tc.SFrame({
            'image': [self.sf[self.feature][0]],
            'ann': [self.sf[self.annotations][0][:1]]
        })
        model2 = tc.object_detector.create(sf, max_iterations=1)
        model2.export_coreml(filename2, include_non_maximum_suppression=False)
コード例 #6
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """

        # Save the model as a CoreML model file
        filename = tempfile.mkstemp('ImageSimilarity.mlmodel')[1]
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)

        # Get model distances for comparison
        tc_ret = self.model.query(self.sf[:1], k=self.sf.num_rows())

        if _mac_ver() >= (10, 13):
            from PIL import Image as _PIL_Image

            ref_img = self.sf[0]['awesome_image'].pixel_data
            pil_img = _PIL_Image.fromarray(ref_img)
            coreml_ret = coreml_model.predict({'awesome_image': pil_img},
                                              useCPUOnly=True)

            # Compare distances
            coreml_distances = np.array(sorted(coreml_ret['distance']))
            tc_distances = tc_ret['distance'].to_numpy()
            np.testing.assert_array_almost_equal(tc_distances,
                                                 coreml_distances,
                                                 decimal=2)
コード例 #7
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """
        def get_psnr(x, y):
            # See: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
            # The higher the number the better.
            return 20 * np.log10(max(x.max(), y.max())) - 10 * np.log10(
                np.square(x - y).mean())

        # Save the model as a CoreML model file
        filename = tempfile.mkstemp('ImageSimilarity.mlmodel')[1]
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)

        # Get model distances for comparison
        img = data[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img,
                                             *reversed(self.input_image_shape))
        tc_ret = self.model.query(img_fixed, k=data.num_rows())

        if _mac_ver() >= (10, 13):
            from PIL import Image as _PIL_Image
            pil_img = _PIL_Image.fromarray(img_fixed.pixel_data)
            coreml_ret = coreml_model.predict({'awesome_image': pil_img})

            # Compare distances
            coreml_distances = np.array(coreml_ret['distance'])
            tc_distances = tc_ret.sort(
                'reference_label')['distance'].to_numpy()
            psnr_value = get_psnr(coreml_distances, tc_distances)
            self.assertTrue(psnr_value > 50)
コード例 #8
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        filename = tempfile.mkstemp('bingo.mlmodel')[1]
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        coreml_model = coremltools.models.MLModel(filename)
        img = self.train[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret['coordinates'].shape[1], 4)
            self.assertEqual(ret['confidence'].shape[1], len(_CLASSES))
            self.assertEqual(ret['coordinates'].shape[0],
                             ret['confidence'].shape[0])

        # Also check if we can train a second model and export it (there could
        # be naming issues in mxnet)
        filename2 = tempfile.mkstemp('bingo2.mlmodel')[1]
        # We also test at the same time if we can export a model with a single
        # class
        sf = tc.SFrame({
            'image': tc.SArray([self.train[self.feature][0]]),
            'label': tc.SArray([self.train[self.target][0]])
        })
        model2 = tc.one_shot_object_detector.create(sf,
                                                    'label',
                                                    max_iterations=1)
        model2.export_coreml(filename2, include_non_maximum_suppression=False)
コード例 #9
0
    def test_export_coreml_predict(self):
        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename)

        coreml_model = coremltools.models.MLModel(filename)
        img = data[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img,
                                             *reversed(self.input_image_shape))
        from PIL import Image

        pil_img = Image.fromarray(img_fixed.pixel_data)

        if _mac_ver() >= (10, 13):
            classes = self.model.classifier.classes
            ret = coreml_model.predict({self.feature: pil_img})
            coreml_values = [
                ret[self.target + "Probability"][l] for l in classes
            ]

            self.assertListAlmostEquals(
                coreml_values,
                list(
                    self.model.predict(img_fixed,
                                       output_type="probability_vector")),
                self.tolerance,
            )
コード例 #10
0
    def test_export_coreml_with_predict(self):
        filename = tempfile.mkstemp('bingo.mlmodel')[1]
        self.model.export_coreml(filename)

        coreml_model = coremltools.models.MLModel(filename)
        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img,
                                             *reversed(self.input_image_shape))
        import PIL
        pil_img = PIL.Image.fromarray(img_fixed.pixel_data)

        if _mac_ver() >= (10, 13):
            classes = self.model.classifier.classes
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            coreml_values = [
                ret[self.target + 'Probability'][l] for l in classes
            ]

            self.assertListAlmostEquals(
                coreml_values,
                list(
                    self.model.predict(self.sf[0:1],
                                       output_type='probability_vector')[0]),
                self.tolerance)
コード例 #11
0
    def test_export_coreml_predict(self):
        import coremltools

        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename)

        coreml_model = coremltools.models.MLModel(filename)
        if self.feature == "awesome_image":
            img = data[0:1][self.feature][0]
            img_fixed = tc.image_analysis.resize(
                img, *reversed(self.input_image_shape))
            from PIL import Image

            pil_img = Image.fromarray(img_fixed.pixel_data)

            if _mac_ver() >= (10, 13):
                classes = self.model.classifier.classes
                ret = coreml_model.predict({self.feature: pil_img})
                coreml_values = [
                    ret[self.target + "Probability"][l] for l in classes
                ]

                self.assertListAlmostEquals(
                    coreml_values,
                    list(
                        self.model.predict(img_fixed,
                                           output_type="probability_vector")),
                    self.tolerance,
                )
        else:
            # If the code came here that means the type of the feature used is deep_deatures and the predict fwature in coremltools doesn't work with deep_features yet so we will ignore this specific test case unitl the same is written.
            pass
コード例 #12
0
    def test_export_coreml_with_non_maximum_suppression(self):
        from PIL import Image

        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename, include_non_maximum_suppression=True)

        coreml_model = coremltools.models.MLModel(filename)
        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img}, usesCPUOnly=True)
            self.assertEqual(ret["coordinates"].shape[1], 4)
            self.assertEqual(ret["confidence"].shape[1], len(_CLASSES))
            self.assertEqual(ret["coordinates"].shape[0], ret["confidence"].shape[0])
            # A numeric comparison of the resulting of top bounding boxes is
            # not that meaningful unless the model has converged

        # Also check if we can train a second model and export it.
        filename2 = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        # We also test at the same time if we can export a model with a single
        # class
        sf = tc.SFrame(
            {
                "image": [self.sf[self.feature][0]],
                "ann": [self.sf[self.annotations][0][:1]],
            }
        )
        model2 = tc.object_detector.create(sf, max_iterations=1)
        model2.export_coreml(filename2, include_non_maximum_suppression=True)
コード例 #13
0
 def test_logistic_multiclass_tiny(self):
     if _mac_ver() < (10, 14):
         pytest.xfail("See https://github.com/apple/turicreate/issues/1332")
     for code_string in ["b"*40, "nnnn", "v", "d", "A", "bnsCvAd"]:
         train, test = self.generate_data("multiclass", 8, code_string)
         model = tc.logistic_classifier.create(train, "target", validation_set = None)
         model.evaluate(test)  # Previous regression -- this caused errors.
         self._test_coreml_export(model, test, False)
コード例 #14
0
 def test_linear_regression(self):
     if _mac_ver() < (10, 14):
         pytest.xfail("See https://github.com/apple/turicreate/issues/1332")
     for code_string in ["b"*40, "nnnn", "v", "d", "A", "bnsCvAd"]:
         train, test = self.generate_data("regression", 100, code_string)
         model = tc.linear_regression.create(train, "target", validation_set = None)
         model.evaluate(test)  # Previous regression -- this caused errors.
         self._test_coreml_export(model, test, True)
コード例 #15
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """
        import coremltools

        def get_psnr(x, y):
            # See: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
            # The higher the number the better.
            return 20 * np.log10(max(x.max(), y.max())) - 10 * np.log10(
                np.square(x - y).mean())

        # Save the model as a CoreML model file
        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)
        import platform

        self.assertDictEqual(
            {
                "com.github.apple.turicreate.version": tc.__version__,
                "com.github.apple.os.platform": platform.platform(),
                "type": "ImageSimilarityModel",
                "coremltoolsVersion": coremltools.__version__,
                "version": "1",
            },
            dict(coreml_model.user_defined_metadata),
        )

        expected_result = (
            "Image similarity (%s) created by Turi Create (version %s)" %
            (self.model.model, tc.__version__))

        # Get model distances for comparison
        if self.feature == "awesome_image":
            img = data[0:1][self.feature][0]
            img_fixed = tc.image_analysis.resize(
                img, *reversed(self.input_image_shape))
            tc_ret = self.model.query(img_fixed, k=data.num_rows())

            if _mac_ver() >= (10, 13):
                from PIL import Image as _PIL_Image

                pil_img = _PIL_Image.fromarray(img_fixed.pixel_data)
                coreml_ret = coreml_model.predict({"awesome_image": pil_img})

                # Compare distances
                coreml_distances = np.array(coreml_ret["distance"])
                tc_distances = tc_ret.sort(
                    "reference_label")["distance"].to_numpy()
                psnr_value = get_psnr(coreml_distances, tc_distances)
                self.assertTrue(psnr_value > 50)
        else:
            # If the code came here that means the type of the feature used is deep_deatures and the predict fwature in coremltools doesn't work with deep_features yet so we will ignore this specific test case unitl the same is written.
            pass
コード例 #16
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        import platform

        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        ## Test metadata
        coreml_model = coremltools.models.MLModel(filename)
        self.maxDiff = None
        self.assertDictEqual(
            {
                "com.github.apple.turicreate.version": tc.__version__,
                "com.github.apple.os.platform": platform.platform(),
                "type": "object_detector",
                "classes": ",".join(sorted(_CLASSES)),
                "feature": self.feature,
                "include_non_maximum_suppression": "False",
                "annotations": "annotation",
                "max_iterations": "1",
                "model": "YOLOv2",
                "training_iterations": "1",
                "version": "1",
            },
            dict([(str(k), v)
                  for k, v in coreml_model.user_defined_metadata.items()]),
        )
        expected_result = (
            "One shot object detector created by Turi Create (version %s)" %
            (tc.__version__))
        self.assertEquals(expected_result, coreml_model.short_description)

        ## Test prediction
        img = self.train[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret["coordinates"].shape[1], 4)
            self.assertEqual(ret["confidence"].shape[1], len(_CLASSES))
            self.assertEqual(ret["coordinates"].shape[0],
                             ret["confidence"].shape[0])

        # Test export without non max supression
        filename2 = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename2,
                                 include_non_maximum_suppression=True)
        coreml_model = coremltools.models.MLModel(filename)
        self.assertTrue(
            coreml_model.
            user_defined_metadata["include_non_maximum_suppression"])
コード例 #17
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        import platform

        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        coreml_model = coremltools.models.MLModel(filename)
        self.assertDictEqual(
            {
                "com.github.apple.turicreate.version": tc.__version__,
                "com.github.apple.os.platform": platform.platform(),
                "annotations": self.annotations,
                "type": "object_detector",
                "classes": ",".join(sorted(_CLASSES)),
                "feature": self.feature,
                "include_non_maximum_suppression": "False",
                "max_iterations": "1",
                "model": "YOLOv2",
                "training_iterations": "1",
                "version": "1",
            },
            dict([(str(k), v)
                  for k, v in coreml_model.user_defined_metadata.items()]),
        )
        expected_result = "Object detector created by Turi Create (version %s)" % (
            tc.__version__)
        self.assertEquals(expected_result, coreml_model.short_description)

        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret["coordinates"].shape[1], 4)
            self.assertEqual(ret["confidence"].shape[1], len(_CLASSES))
            self.assertEqual(ret["coordinates"].shape[0],
                             ret["confidence"].shape[0])
            # A numeric comparison of the resulting of top bounding boxes is
            # not that meaningful unless the model has converged

        # Also check if we can train a second model and export it.
        filename2 = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        # We also test at the same time if we can export a model with a single
        # class
        sf = tc.SFrame({
            "image": [self.sf[self.feature][0]],
            "ann": [self.sf[self.annotations][0][:1]],
        })
        model2 = tc.object_detector.create(sf, max_iterations=1)
        model2.export_coreml(filename2, include_non_maximum_suppression=False)
コード例 #18
0
ファイル: _mxnet_utils.py プロジェクト: zmcartor/turicreate
def assert_valid_num_gpus():
    from turicreate.util import _CUDA_GPU_IDS
    num_gpus = _tc_config.get_num_gpus()
    if not _CUDA_GPU_IDS and _sys.platform == 'darwin':
        # GPU acceleration requires macOS 10.14+
        if num_gpus == 1 and _mac_ver() < (10, 14):
            raise _ToolkitError(
                'GPU acceleration requires at least macOS 10.14')
        elif num_gpus >= 2:
            raise _ToolkitError(
                'Using more than one GPU is currently not supported on Mac')
    _numeric_param_check_range('num_gpus', num_gpus, -1, _six.MAXSIZE)
コード例 #19
0
 def _coreml_python_predict(self, coreml_model, img_fixed):
     from PIL import Image
     pil_img = Image.fromarray(img_fixed.pixel_data)
     if _mac_ver() >= (10, 13):
         index_data = np.zeros(self.num_styles)
         index_data[0] = 1
         coreml_output = coreml_model.predict(
             {self.content_feature: pil_img, 'index': index_data},
             usesCPUOnly=True)
         img = next(iter(coreml_output.values()))
         img = np.asarray(img)
         img = img[..., 0:3]
         return img
コード例 #20
0
    def _test_coreml_export(self, model, test_sf, is_regression, has_probability = None, predict_topk = None):

        if has_probability is None:
            has_probability = not is_regression

        if predict_topk is None:
            predict_topk = not is_regression

        # Act & Assert
        with tempfile.NamedTemporaryFile(mode='w', suffix = '.mlmodel') as mlmodel_file:
            mlmodel_filename = mlmodel_file.name
            model.export_coreml(mlmodel_filename)
            coreml_model = coremltools.models.MLModel(mlmodel_filename)
            self.assertDictEqual({
                   'com.github.apple.turicreate.version': tc.__version__,
                   'com.github.apple.os.platform': platform.platform(),
                }, dict(coreml_model.user_defined_metadata)
            )

            if _mac_ver() < (10, 13):
                print("Skipping export test; model not supported on this platform.")
                return

            def array_to_numpy(row):
                import array
                import numpy
                import copy
                row = copy.copy(row)
                for r in row:
                    if type(row[r]) == array.array:
                        row[r] = numpy.array(row[r])
                return row

            for row in test_sf:

                coreml_prediction = coreml_model.predict(array_to_numpy(row))
                tc_prediction = model.predict(row)[0]

                if (is_regression == False) and (type(model.classes[0]) == str):
                    if not has_probability:
                        self.assertEqual(coreml_prediction["target"], tc_prediction)
                else:
                    self.assertAlmostEqual(coreml_prediction["target"], tc_prediction, delta = 1e-5)

                # If applicable, compare probabilistic output
                if has_probability and not is_regression:
                    coreml_ret = coreml_prediction["targetProbability"]
                    _, values_tuple = zip(*sorted(coreml_ret.items()))
                    coreml_probs = np.array(values_tuple)
                    tc_probs = np.array(model.predict(row, output_type='probability_vector')[0])
                    np.testing.assert_array_almost_equal(coreml_probs, tc_probs, decimal=5)
コード例 #21
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        import platform
        filename = tempfile.mkstemp('bingo.mlmodel')[1]
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        coreml_model = coremltools.models.MLModel(filename)
        self.assertDictEqual(
            {
                'com.github.apple.turicreate.version': tc.__version__,
                'com.github.apple.os.platform': platform.platform(),
                'annotations': self.annotations,
                'type': 'object_detector',
                'classes': ','.join(sorted(_CLASSES)),
                'feature': self.feature,
                'include_non_maximum_suppression': 'False',
                'max_iterations': '1',
                'model': 'darknet-yolo',
                'training_iterations': '1',
                'version': '1',
            }, dict(coreml_model.user_defined_metadata))
        expected_result = 'Object detector created by Turi Create (version %s)' \
                                    % (tc.__version__)
        self.assertEquals(expected_result, coreml_model.short_description)

        img = self.sf[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret['coordinates'].shape[1], 4)
            self.assertEqual(ret['confidence'].shape[1], len(_CLASSES))
            self.assertEqual(ret['coordinates'].shape[0],
                             ret['confidence'].shape[0])
            # A numeric comparison of the resulting of top bounding boxes is
            # not that meaningful unless the model has converged

        # Also check if we can train a second model and export it (there could
        # be naming issues in mxnet)
        filename2 = tempfile.mkstemp('bingo2.mlmodel')[1]
        # We also test at the same time if we can export a model with a single
        # class
        sf = tc.SFrame({
            'image': [self.sf[self.feature][0]],
            'ann': [self.sf[self.annotations][0][:1]]
        })
        model2 = tc.object_detector.create(sf, max_iterations=1)
        model2.export_coreml(filename2, include_non_maximum_suppression=False)
コード例 #22
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        import platform
        filename = tempfile.mkstemp('bingo.mlmodel')[1]
        self.model.export_coreml(filename,
                                 include_non_maximum_suppression=False)

        ## Test metadata
        coreml_model = coremltools.models.MLModel(filename)
        self.maxDiff = None
        self.assertDictEqual(
            {
                'com.github.apple.turicreate.version': tc.__version__,
                'com.github.apple.os.platform': platform.platform(),
                'type': 'object_detector',
                'classes': ','.join(sorted(_CLASSES)),
                'feature': self.feature,
                'include_non_maximum_suppression': 'False',
                'annotations': 'annotation',
                'max_iterations': '1',
                'model': 'darknet-yolo',
                'training_iterations': '1',
                'version': '1',
            }, dict(coreml_model.user_defined_metadata))
        expected_result = 'One shot object detector created by Turi Create (version %s)' \
                                    % (tc.__version__)
        self.assertEquals(expected_result, coreml_model.short_description)

        ## Test prediction
        img = self.train[0:1][self.feature][0]
        img_fixed = tc.image_analysis.resize(img, 416, 416, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            ret = coreml_model.predict({self.feature: pil_img},
                                       usesCPUOnly=True)
            self.assertEqual(ret['coordinates'].shape[1], 4)
            self.assertEqual(ret['confidence'].shape[1], len(_CLASSES))
            self.assertEqual(ret['coordinates'].shape[0],
                             ret['confidence'].shape[0])

        # Test export without non max supression
        filename2 = tempfile.mkstemp('bingo2.mlmodel')[1]
        self.model.export_coreml(filename2,
                                 include_non_maximum_suppression=True)
        coreml_model = coremltools.models.MLModel(filename)
        self.assertTrue(
            coreml_model.
            user_defined_metadata['include_non_maximum_suppression'])
コード例 #23
0
    def check_prediction_match(self, model, coreml_model):
        # Create a small dataset, and compare the models' predict() output
        rs = np.random.RandomState(1234)
        dataset = tc.util.generate_random_sframe(column_codes="r" * 3,
                                                 num_rows=10)
        dataset["session_id"] = 0
        dataset[self.target] = random_labels = [
            rs.randint(
                0,
                self.num_labels - 1,
            ) for i in range(10)
        ]

        if _mac_ver() >= (10, 13):
            w = self.prediction_window
            labels = list(map(str, sorted(model.classes)))

            input_features = {}
            for f in self.features:
                input_features[f] = dataset[f].to_numpy()
            first_input_dict = {}
            second_input_dict = {}
            for key, value in input_features.items():
                first_input_dict[key] = value[:w].copy()
                second_input_dict[key] = value[w:2 * w].copy()
            first_input_dict["stateIn"] = np.zeros((400))
            ret0 = coreml_model.predict(first_input_dict)

            second_input_dict["stateIn"] = ret0["stateOut"]
            ret1 = coreml_model.predict(second_input_dict)

            pred = model.predict(dataset, output_type="probability_vector")
            model_time0_values = pred[0]
            model_time1_values = pred[w]
            model_predictions = np.array(
                [model_time0_values, model_time1_values])
            coreml_time0_values = [
                ret0[self.target + "Probability"][l] for l in labels
            ]
            coreml_time1_values = [
                ret1[self.target + "Probability"][l] for l in labels
            ]
            coreml_predictions = np.array(
                [coreml_time0_values, coreml_time1_values])

            np.testing.assert_array_almost_equal(model_predictions,
                                                 coreml_predictions,
                                                 decimal=3)
コード例 #24
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """
        import coremltools
        import platform

        def get_psnr(x, y):
            # See: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
            # The higher the number the better.
            return 20 * np.log10(max(x.max(), y.max())) - 10 * np.log10(
                np.square(x - y).mean()
            )

        # Save the model as a CoreML model file
        filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)
        metadata = coreml_model.user_defined_metadata

        self.assertEqual(metadata["com.github.apple.turicreate.version"], tc.__version__)
        self.assertEqual(metadata["com.github.apple.os.platform"], platform.platform())
        self.assertEqual(metadata["type"], "ImageSimilarityModel")
        self.assertEqual(metadata["version"], "1")

        # Get model distances for comparison
        if self.feature == "awesome_image":
            # tc.image_classifier.create(...) was not called with deep features
            img = data[0:1][self.feature][0]
            img_fixed = tc.image_analysis.resize(img, *reversed(self.input_image_shape))
            tc_ret = self.model.query(img_fixed, k=data.num_rows())

            if _mac_ver() >= (10, 13):
                from PIL import Image as _PIL_Image

                pil_img = _PIL_Image.fromarray(img_fixed.pixel_data)
                coreml_ret = coreml_model.predict({"awesome_image": pil_img})

                # Compare distances
                coreml_distances = np.array(coreml_ret["distance"])
                tc_distances = tc_ret.sort("reference_label")["distance"].to_numpy()
                psnr_value = get_psnr(coreml_distances, tc_distances)
                self.assertTrue(psnr_value > 50)
コード例 #25
0
    def test_export_coreml(self):
        """
        Check the export_coreml() function.
        """
        import coremltools
        # Save the model as a CoreML model file
        filename = tempfile.mkstemp('ActivityClassifier.mlmodel')[1]
        self.model.export_coreml(filename)

        # Load the model back from the CoreML model file
        coreml_model = coremltools.models.MLModel(filename)

        rs = np.random.RandomState(1234)

        # Create a small dataset, and compare the models' predict() output
        dataset = tc.util.generate_random_sframe(column_codes='r' * 3, num_rows=10)
        dataset['session_id'] = 0
        dataset[self.target] = random_labels = [rs.randint(0, self.num_labels - 1, ) for i in range(10)]

        if _mac_ver() >= (10, 13):
            w = self.prediction_window
            labels = list(map(str, sorted(self.model._target_id_map.keys())))

            data_list = [dataset[f].to_numpy()[:, np.newaxis] for f in self.features]
            np_data = np.concatenate(data_list, 1)[np.newaxis]

            pred = self.model.predict(dataset, output_type='probability_vector')
            model_time0_values = pred[0]
            model_time1_values = pred[w]
            model_predictions = np.array([model_time0_values, model_time1_values])

            ret0 = coreml_model.predict({'features' : np_data[:, :w].copy()})

            ret1 = coreml_model.predict({'features' : np_data[:, w:2*w].copy(),
                                         'hiddenIn': ret0['hiddenOut'],
                                         'cellIn': ret0['cellOut']})

            coreml_time0_values = [ret0[self.target + 'Probability'][l] for l in labels]
            coreml_time1_values = [ret1[self.target + 'Probability'][l] for l in labels]
            coreml_predictions = np.array([coreml_time0_values, coreml_time1_values])

            np.testing.assert_array_almost_equal(model_predictions, coreml_predictions, decimal=3)
コード例 #26
0
    def test_export_coreml(self):
        from PIL import Image
        import coremltools
        filename = tempfile.mkstemp('my_style_transfer.mlmodel')[1]
        model = self.model
        model.export_coreml(filename)

        coreml_model = coremltools.models.MLModel(filename)
        img = self.style_sf[0:1][self.style_feature][0]
        img_fixed = tc.image_analysis.resize(img, 256, 256, 3)
        pil_img = Image.fromarray(img_fixed.pixel_data)
        if _mac_ver() >= (10, 13):
            index_data = np.zeros(self.num_styles)
            index_data[0] = 1
            coreml_output = coreml_model.predict(
                {
                    self.content_feature: pil_img,
                    'index': index_data
                },
                usesCPUOnly=True)
            img = next(iter(coreml_output.values()))
            img = np.asarray(img)
            img = img[..., 0:3]

            self.assertEqual(img.shape, (256, 256, 3))

        # Also check if we can train a second model and export it (there could
        # be naming issues in mxnet)
        filename2 = tempfile.mkstemp('my_style_transfer2.mlmodel')[1]
        # We also test at the same time if we can export a model with a single
        # class

        model2 = tc.style_transfer.create(self.style_sf,
                                          self.content_sf,
                                          max_iterations=1)
        model2.export_coreml(filename2)
コード例 #27
0
            print("Get passed")
            self.test_summary()
            print("Summary passed")
            self.test_list_fields()
            print("List fields passed")
            self.test_export_coreml()
            print("Export coreml passed")


class ImageSimilaritySqueezeNetTest(ImageSimilarityTest):
    @classmethod
    def setUpClass(self):
        super(ImageSimilaritySqueezeNetTest, self).setUpClass(model='squeezenet_v1.1',
                                                              input_image_shape=(3, 227, 227))


@unittest.skipIf(_mac_ver() < (10,14), 'VisionFeaturePrint_Scene only supported on macOS 10.14+')
class ImageSimilarityVisionFeaturePrintSceneTest(ImageSimilarityTest):
    @classmethod
    def setUpClass(self):
        super(ImageSimilarityVisionFeaturePrintSceneTest, self).setUpClass(model='VisionFeaturePrint_Scene',
                                                                             input_image_shape=(3, 299, 299))

# A test to gaurantee that old code using the incorrect name still works.
@unittest.skipIf(_mac_ver() < (10,14), 'VisionFeaturePrint_Scene only supported on macOS 10.14+')
class ImageSimilarityVisionFeaturePrintSceneTest_bad_name(ImageSimilarityTest):
    @classmethod
    def setUpClass(self):
        super(ImageSimilarityVisionFeaturePrintSceneTest_bad_name, self).setUpClass(model='VisionFeaturePrint_Screen',
                                                                             input_image_shape=(3, 299, 299))
コード例 #28
0
            'class': None,
            'probability': None
        })
        for i in classify_results[:-1]:
            self.assertNotEqual(i['class'], None)
            self.assertNotEqual(i['probability'], None)

        topk_results = model.predict_topk(self.data)
        self.assertEqual(topk_results[-1]['class'], None)
        self.assertEqual(topk_results[-1]['probability'], None)
        for r in topk_results[:-1]:
            self.assertNotEqual(r['class'], None)
            self.assertNotEqual(r['probability'], None)


@unittest.skipIf(_mac_ver() < (10, 14),
                 'Custom models only supported on macOS 10.14+')
class CoreMlCustomModelPreprocessingTest(unittest.TestCase):
    sample_rate = 16000
    frame_length = int(.975 * sample_rate)

    def test_case(self):
        from turicreate.toolkits.sound_classifier import vggish_input

        model = coremltools.proto.Model_pb2.Model()
        model.customModel.className = 'TCSoundClassifierPreprocessing'
        model.specificationVersion = 3

        # Input - float array with shape (frame_length)
        x = model.description.input.add()
        x.name = 'x'
コード例 #29
0
class ClassifierTestTwoClassesStringLabels(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.data = copy(binary_test_data)
        self.is_binary_classification = True
        self.model = tc.sound_classifier.create(self.data,
                                                'labels',
                                                feature='audio',
                                                max_iterations=100)

    def test_create_invalid_max_iterations(self):
        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               max_iterations=0)

        with self.assertRaises(TypeError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               max_iterations='1')

    def test_create_with_invalid_custom_layers(self):
        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               custom_layer_sizes=[])

        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               custom_layer_sizes={})

        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               custom_layer_sizes=['1'])

        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               custom_layer_sizes=[-1])

        with self.assertRaises(ToolkitError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               custom_layer_sizes=[0, 0])

    def test_create_with_invalid_batch_size(self):
        with self.assertRaises(ValueError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               batch_size=-1)

        with self.assertRaises(TypeError):
            model = tc.sound_classifier.create(self.data,
                                               'labels',
                                               feature='audio',
                                               batch_size=[])

    def test_predict(self):
        # default ('class') output_type
        predictions = self.model.predict(self.data['audio'])
        _raise_error_if_not_sarray(predictions)
        self.assertEqual(len(predictions), len(self.data))
        for a, b in zip(predictions, self.data['labels']):
            self.assertEqual(a, b)

        # 'probability' output_type
        if self.is_binary_classification:
            predictions = self.model.predict(self.data['audio'],
                                             output_type='probability')
            _raise_error_if_not_sarray(predictions)
            self.assertEqual(len(predictions), len(self.data))
            for probabilities, correct_label in zip(predictions,
                                                    self.data['labels']):
                # correct value has highest probability?
                correct_index = self.model.classes.index(correct_label)
                self.assertEqual(np.argmax(probabilities), correct_index)
                # all probabilities sum close to 1?
                self.assertTrue(abs(np.sum(probabilities) - 1) < 0.00001)
        else:
            # 'probability' output type only supported for binary classification
            with self.assertRaises(ToolkitError):
                self.model.predict(self.data['audio'],
                                   output_type='probability')

        # 'probability_vector' output_type
        predictions = self.model.predict(self.data['audio'],
                                         output_type='probability_vector')
        _raise_error_if_not_sarray(predictions)
        self.assertEqual(len(predictions), len(self.data))
        for prob_vector, correct_label in zip(predictions,
                                              self.data['labels']):
            # correct value has highest probability?
            correct_index = self.model.classes.index(correct_label)
            self.assertEqual(np.argmax(prob_vector), correct_index)
            # all probabilities sum close to 1?
            self.assertTrue(abs(np.sum(prob_vector) - 1) < 0.00001)

        # predict with single (dict) example
        single_prediction = self.model.predict(self.data['audio'][0])
        _raise_error_if_not_sarray(single_prediction)
        self.assertEqual(len(single_prediction), 1)
        self.assertEqual(single_prediction[0], self.data['labels'][0])

        # predict with SFrame
        data = self.data.copy()
        del data['labels']
        predictions = self.model.predict(data)
        _raise_error_if_not_sarray(predictions)
        self.assertEqual(len(predictions), len(data))
        for a, b in zip(predictions, self.data['labels']):
            self.assertEqual(a, b)

    def test_save_and_load(self):
        with TempDirectory() as filename:
            self.model.save(filename)
            new_model = tc.load_model(filename)

        self.assertEqual(self.model.feature, new_model.feature)

        old_model_probs = self.model.predict(self.data['audio'],
                                             output_type='probability_vector')
        new_model_probs = new_model.predict(self.data['audio'],
                                            output_type='probability_vector')
        for a, b in zip(old_model_probs, new_model_probs):
            np.testing.assert_array_almost_equal(a, b, decimal=6)

    @unittest.skipIf(_mac_ver() < (10, 14),
                     'Custom models only supported on macOS 10.14+')
    def test_export_coreml_with_prediction(self):
        import resampy

        with TempDirectory() as temp_dir:
            file_name = temp_dir + '/model.mlmodel'
            self.model.export_coreml(file_name)
            core_ml_model = coremltools.models.MLModel(file_name)

        # Check predictions
        for cur_audio in self.data['audio']:
            resampled_data = resampy.resample(cur_audio['data'],
                                              cur_audio['sample_rate'], 16000)
            first_audio_frame = resampled_data[:15600]

            tc_x = {'data': first_audio_frame, 'sample_rate': 16000}
            tc_prob_vector = self.model.predict(
                tc_x, output_type='probability_vector')[0]

            coreml_x = np.float32(first_audio_frame /
                                  32768.0)  # Convert to [-1.0, +1.0]
            coreml_y = core_ml_model.predict({'audio': coreml_x})

            core_ml_prob_output_name = self.model.target + 'Probability'
            for i, cur_class in enumerate(self.model.classes):
                self.assertAlmostEquals(
                    tc_prob_vector[i],
                    coreml_y[core_ml_prob_output_name][cur_class],
                    delta=0.001)
        # Check metadata
        metadata = core_ml_model.get_spec().description.metadata
        self.assertTrue('sampleRate' in metadata.userDefined)
        self.assertEqual(metadata.userDefined['sampleRate'], '16000')

    def test_export_core_ml_no_prediction(self):
        import platform
        with TempDirectory() as temp_dir:
            file_name = temp_dir + '/model.mlmodel'
            self.model.export_coreml(file_name)
            core_ml_model = coremltools.models.MLModel(file_name)

        # Check metadata
        metadata = core_ml_model.get_spec().description.metadata
        self.assertTrue('sampleRate' in metadata.userDefined)
        self.assertEqual(metadata.userDefined['sampleRate'], '16000')
        self.assertDictEqual(
            {
                'com.github.apple.turicreate.version': tc.__version__,
                'com.github.apple.os.platform': platform.platform(),
                'type': 'SoundClassifier',
                'coremltoolsVersion': coremltools.__version__,
                'sampleRate': '16000',
                'version': '1'
            }, dict(core_ml_model.user_defined_metadata))
        expected_result = 'Sound classifier created by Turi Create (version %s)' % (
            tc.__version__)
        self.assertEquals(expected_result, core_ml_model.short_description)

    def test_evaluate(self):
        evaluation = self.model.evaluate(self.data)

        # Verify that all metrics are included in the result.
        for metric in [
                'accuracy', 'auc', 'precision', 'recall', 'f1_score',
                'log_loss', 'confusion_matrix', 'roc_curve'
        ]:
            self.assertIn(metric, evaluation)

    def test_classify(self):
        classification = self.model.classify(self.data)
        for a, b in zip(classification['class'], self.data['labels']):
            self.assertEqual(a, b)
        for p in classification['probability']:
            if self.is_binary_classification:
                self.assertTrue(p > .5)
            else:
                self.assertTrue(p > .33)

    def test_predict_topk(self):
        topk_predictions = self.model.predict_topk(self.data, k=2)
        self.assertEqual(len(topk_predictions), len(self.data) * 2)
        self.assertEqual(3, len(topk_predictions.column_names()))
        for column in ['id', 'class', 'probability']:
            self.assertIn(column, topk_predictions.column_names())

        topk_predictions = self.model.predict_topk(self.data,
                                                   k=1,
                                                   output_type='rank')
        self.assertEqual(len(topk_predictions), len(self.data) * 1)
        self.assertEqual(3, len(topk_predictions.column_names()))
        for column in ['id', 'class', 'rank']:
            self.assertIn(column, topk_predictions.column_names())
        unique_ranks = topk_predictions['rank'].unique()
        self.assertTrue(len(unique_ranks) == 1)
        self.assertTrue(unique_ranks[0] == 0)

    def test_predict_topk_invalid_k(self):
        with self.assertRaises(ToolkitError):
            pred = self.model.predict_topk(self.data, k=-1)

        with self.assertRaises(ToolkitError):
            pred = self.model.predict_topk(self.data, k=0)

        with self.assertRaises(TypeError):
            pred = self.model.predict_topk(self.data, k={})

    def test_validation_set(self):
        self.assertTrue(self.model.validation_accuracy is None)

    def test_summary(self):
        """
        Check the summary function.
        """
        model = self.model
        model.summary()

    def test_summary_str(self):
        model = self.model
        self.assertTrue(isinstance(model.summary('str'), str))

    def test_summary_dict(self):
        model = self.model
        self.assertTrue(isinstance(model.summary('dict'), dict))

    def test_summary_invalid_input(self):
        model = self.model
        with self.assertRaises(ToolkitError):
            model.summary(model.summary('invalid'))

        with self.assertRaises(ToolkitError):
            model.summary(model.summary(0))

        with self.assertRaises(ToolkitError):
            model.summary(model.summary({}))
コード例 #30
0
class CoreMLExportTest(unittest.TestCase):
    @classmethod
    def setUpClass(self, multiclass=False):

        ## Simulate test data
        rs = np.random.RandomState(10)
        n, d = 100, 10
        self.sf = tc.SFrame()
        for i in range(d):
            self.sf.add_column(tc.SArray(array.array('d', rs.randn(n))),
                               inplace=True)

        # Add a categorical column
        categories = np.array(['cat', 'dog', 'foosa'])
        cat_index = rs.randint(len(categories), size=n)
        self.sf['cat_column'] = list(categories[cat_index])
        self.sf['dict_column'] = self.sf['cat_column'].apply(
            lambda x: {x: 1.0})
        self.sf['array_column'] = self.sf.apply(
            lambda x: [x['X1'] * 1.9, x['X2'] * 2.1])

        # Add a target
        target = rs.randint(2, size=n)
        target[0] = 0
        target[1] = 1

        ## Create the model
        self.sf['target'] = target
        self.target = 'target'

        self.model = None
        self.regression = False
        self.has_probability = True

        if multiclass:
            target = rs.randint(3, size=n)
            target[0] = 0
            target[1] = 1
            target[2] = 2
            self.sf[self.target] = target

    def test_coreml_export_new_data(self):
        if self.model is None:
            return

        # Arrange
        model = self.model
        test_data = self.sf[:]
        test_data['cat_column'] = 'new_cat'
        test_data['dict_column'] = [{
            'new_cat': 1
        } for i in range(len(test_data))]

        # Assert
        model.predict(test_data)
        with tempfile.NamedTemporaryFile(mode='w',
                                         suffix='.mlmodel') as mlmodel_file:
            mlmodel_filename = mlmodel_file.name
            model.export_coreml(mlmodel_filename)

    def test_coreml_export(self):
        if self.model is None:
            return

        # Arrange
        model = self.model

        # Act & Assert
        with tempfile.NamedTemporaryFile(mode='w',
                                         suffix='.mlmodel') as mlmodel_file:
            mlmodel_filename = mlmodel_file.name
            model.export_coreml(mlmodel_filename)
            coreml_model = coremltools.models.MLModel(mlmodel_filename)

    @unittest.skipIf(_mac_ver() < (10, 13), 'Only supported on Mac')
    def test_coreml_export_with_predictions(self):
        if self.model is None:
            return

        # Arrange
        sf = self.sf
        model = self.model

        # Act & Assert
        with tempfile.NamedTemporaryFile(mode='w',
                                         suffix='.mlmodel') as mlmodel_file:
            mlmodel_filename = mlmodel_file.name
            model.export_coreml(mlmodel_filename)
            coreml_model = coremltools.models.MLModel(mlmodel_filename)

            def array_to_numpy(row):
                import array
                import numpy
                import copy
                row = copy.copy(row)
                for r in row:
                    if type(row[r]) == array.array:
                        row[r] = numpy.array(row[r])
                return row

            for row in sf:
                coreml_prediction = coreml_model.predict(array_to_numpy(row))
                tc_prediction = model.predict(row)[0]
                if (self.regression == False) and (type(model.classes[0])
                                                   == str):
                    self.assertEqual(coreml_prediction[self.target],
                                     tc_prediction)
                else:
                    self.assertAlmostEqual(coreml_prediction[self.target],
                                           tc_prediction,
                                           delta=1e-5)

                # If applicable, compare probabilistic output
                if self.has_probability and not self.regression:
                    coreml_ret = coreml_prediction[self.target + 'Probability']
                    _, values_tuple = zip(*sorted(coreml_ret.items()))
                    coreml_probs = np.array(values_tuple)
                    tc_probs = np.array(
                        model.predict(row,
                                      output_type='probability_vector')[0])
                    np.testing.assert_array_almost_equal(coreml_probs,
                                                         tc_probs,
                                                         decimal=5)