Ejemplo n.º 1
0
    def _test_train_model_from_features(self, sparse_gp, multiple_kernels):
        gp_repurposer = GpRepurposer(self.source_model,
                                     self.source_model_layers)

        num_inducing = self.num_data_points_to_train
        gp_repurposer.NUM_INDUCING_SPARSE_GP = num_inducing

        if not sparse_gp:  # Select a small data set to apply normal GP classification
            self.train_features = self.train_features[:num_inducing]
            self.train_labels = self.train_labels[:num_inducing]
            self.feature_mean = self.train_features.mean(axis=0)

        if multiple_kernels:
            trained_model = gp_repurposer._train_model_from_features(
                self.train_features, self.train_labels, {
                    'l1': self.train_feature_indices[:4],
                    'l2': self.train_feature_indices[4:]
                })
        else:
            trained_model = gp_repurposer._train_model_from_features(
                self.train_features, self.train_labels,
                {'l1': self.train_feature_indices})

        assert np.array_equal(gp_repurposer.feature_mean, self.feature_mean)
        self._validate_trained_gp_model(trained_model, sparse_gp, num_inducing,
                                        multiple_kernels)
Ejemplo n.º 2
0
 def test_repurpose(self, mock_model_handler):
     # Patch model_handler and then create gp_repurposer
     mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
     mock_model_handler.return_value.get_layer_output.return_value = {'l1': self.train_features}, self.train_labels
     gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers)
     gp_repurposer.NUM_INDUCING_SPARSE_GP = 5  # To speed-up unit test running time
     self._run_common_repurposer_tests(gp_repurposer)
Ejemplo n.º 3
0
    def _test_gp_serialisation(self, sparse_gp, multiple_kernels):
        gp_repurposer = GpRepurposer(self.source_model,
                                     self.source_model_layers,
                                     apply_l2_norm=True)
        num_inducing = 2
        gp_repurposer.NUM_INDUCING_SPARSE_GP = num_inducing

        if not sparse_gp:  # Select a small data set to apply normal GP classification
            self.train_features = self.train_features[:num_inducing]
            self.train_labels = self.train_labels[:num_inducing]
            self.feature_mean = self.train_features.mean(axis=0)

        if multiple_kernels:
            gp_repurposer.target_model = gp_repurposer._train_model_from_features(
                self.train_features, self.train_labels, {
                    'l1': self.train_feature_indices[:4],
                    'l2': self.train_feature_indices[4:]
                })
        else:
            gp_repurposer.target_model = gp_repurposer._train_model_from_features(
                self.train_features, self.train_labels,
                {'l1': self.train_feature_indices})

        # Save and load repurposer to test serialization
        loaded_repurposer = self._save_and_load_repurposer(gp_repurposer)

        # Validate repurposer properties
        self._compare_gp_repurposers(gp_repurposer, loaded_repurposer)

        # Get prediction results using both repurposers
        predictions_before = gp_repurposer._predict_probability_from_features(
            self.test_features[:self.num_data_points_to_predict])
        predictions_after = loaded_repurposer._predict_probability_from_features(
            self.test_features[:self.num_data_points_to_predict])

        # Compare probabilities predicted per test instance
        self.assertTrue(
            predictions_before.shape == predictions_after.shape,
            "Prediction shape is incorrect. Expected: {} Actual: {}".format(
                predictions_before.shape, predictions_after.shape))

        for sample_id, prediction in enumerate(predictions_before):
            self.assertTrue(
                np.allclose(prediction, predictions_after[sample_id]),
                "Incorrect prediction for sample id: {}. Expected: {} Actual: {}"
                .format(sample_id, predictions_before[sample_id],
                        predictions_after[sample_id]))

        # Validate if accuracy is above expected threshold
        predicted_labels = np.argmax(predictions_after, axis=1)
        accuracy = np.mean(predicted_labels ==
                           self.test_labels[:self.num_data_points_to_predict])
        expected_accuracy = 0.3
        self.assertTrue(
            accuracy >= expected_accuracy,
            "Accuracy {} less than {}".format(accuracy, expected_accuracy))