def test_transform_calls_sklearn_transform(self):
        features = self.X['features'].copy()
        svc = LinearSvc()

        svc.run(self.X)

        np.testing.assert_array_equal(
            LinearSVC.decision_function.call_args[0][0], features)
    def test_save_model(self):
        svc = LinearSvc(name='TestSvc')

        svc.save(self.base_path)

        h5py.File.assert_called_once_with(
            os.path.join(self.base_path, 'processors/TestSvc.h5'), 'w')
        self.h5f.create_dataset.assert_called_once_with(
            'data', data=np.array('Model dump'))
    def test_load_model(self):
        svc = LinearSvc(name='TestSvc')
        svc.load(self.base_path)

        h5py.File.assert_called_once_with(
            os.path.join(self.base_path, 'processors/TestSvc.h5'), 'r')
        if sys.version_info[0] < 3:
            pickle.loads.assert_called_once_with('Model')
        else:
            pickle.loads.assert_called_once_with('Model', encoding='latin1')
        self.assertEquals('Model', svc._model)
    def test_should_calls_sklearn_fit_when_fit(self):
        svc = LinearSvc()
        svc.fit(self.X)

        labels = copy.deepcopy(self.X['labels'])
        expected_labels = copy.deepcopy(labels)
        expected_labels[labels > 0] = 0
        expected_labels[labels == 0] = 1

        np.testing.assert_array_equal(LinearSVC.fit.call_args[0][0],
                                      self.X['features'])
        np.testing.assert_array_equal(LinearSVC.fit.call_args[0][1],
                                      expected_labels)
Exemplo n.º 5
0
    def test_run_configuration_loading_pipeline_from_file_and_skip_training(
            self):
        self.configuration.verbose = False
        # Modifying pipeline
        self.configuration.pipeline = Pipeline(
            'test_approach_pca095_linear_svc',
            [Pca(name='Pca', n_components=0.95),
             LinearSvc(name='LinearSvc')])
        self.configuration.pipeline.load(TestUtils.get_pipeline_path())

        # Skipping training
        self.configuration.skip_training = True

        algorithmic_unconstrained_evaluation_protocol = AlgorithmicUnconstrainedEvaluationProtocol(
            self.configuration)
        algorithmic_unconstrained_evaluation_protocol.run()

        for database in self.configuration.databases_list:
            root_path = os.path.join(TestUtils.get_result_path(), 'AUE',
                                     database.name())
            self.assertTrue(
                os.path.isdir('{}/features/whole_video'.format(root_path)))
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/configurations'
                    .format(root_path)))
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/evaluation'.
                    format(root_path)))
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/experiment_result'
                    .format(root_path)))
def main():
    data = load_breast_cancer()
    features = data['data']
    labels = data['target']
    features_train, features_test, labels_train, labels_test = train_test_split(features,
                                                                                labels,
                                                                                test_size=0.2,
                                                                                random_state=42)

    x_train = {
        'features': features_train,
        'labels': labels_train
    }
    x_test = {
        'features': features_test,
        'labels': labels_test
    }

    save_path = '/tmp/pipeline_example'
    intermediate_features_path = os.path.join(save_path, 'features')
    pipeline_train = Pipeline('Pca_and_linear_svc_pipeline',[Pca(n_components=0.95), LinearSvc(C=1.0)])

    print ('Training pipeline description: ', pipeline_train)

    y_train = pipeline_train.fit_run(x_train)
    print('AUC for training set: {}'.format(str(roc_auc_score(y_train['labels'], y_train['scores']))))

    pipeline_train.save(save_path)
    pca = Pca()
    pca.load(save_path)
    svc = LinearSvc()
    svc.load(save_path)

    pipeline_test = Pipeline('Pca_and_linear_svc_pipeline', [pca,
                                                             PipelineSaver(intermediate_features_path,
                                                                           'PCA_test_features'),
                                                             svc])
    pipeline_test.load(save_path)

    print('Test pipeline description: ', pipeline_test)

    y_test = pipeline_test.run(x_test)
    print('AUC for training set: {}'.format(str(roc_auc_score(y_test['labels'], y_test['scores']))))
Exemplo n.º 7
0
    def test_run_configuration_loading_pipeline_from_file_and_skip_training(
            self):
        # Modifying pipeline
        self.configuration.pipeline = Pipeline(
            'test_approach_pca095_linear_svc',
            [Pca(name='Pca', n_components=0.95),
             LinearSvc(name='LinearSvc')])
        self.configuration.pipeline.load(TestUtils.get_pipeline_path())

        # Skipping training
        self.configuration.skip_training = True
        self.configuration.verbose = False

        algorithmic_unconstrained_evaluation_protocol = AlgorithmicConstrainedEvaluationProtocol(
            self.configuration)
        algorithmic_unconstrained_evaluation_protocol.run()

        for database in self.configuration.databases_list:
            for parameters in self.configuration.access_grid_config.get_parameter_grid(
            ):
                self.assertTrue(
                    os.path.isdir(
                        '{}/ACE/{}/features/framerate{}_duration{}_startingtime{}'
                        .format(TestUtils.get_result_path(), database.name(),
                                parameters['framerate'],
                                parameters['total_time_acquisition'],
                                parameters['starting_time_acquisition'])))

        for database in self.configuration.databases_list:
            root_path = os.path.join(TestUtils.get_result_path(), 'ACE',
                                     database.name())
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/configurations'
                    .format(root_path)))
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/evaluation'.
                    format(root_path)))
            self.assertTrue(
                os.path.isdir(
                    '{}/pipelines/test_approach_pca095_linear_svc/experiment_result'
                    .format(root_path)))
Exemplo n.º 8
0
class UnitTestConfiguration(unittest.TestCase):
    configuration_file = 'resources/config/config_test.py'
    database_paths_filename = 'resources/config/database_paths.json'
    databases = [DummyDatabase('resources')]
    protocols = ['grandtest']
    feature_extractor = DummyFeaturesExtractor()
    pipeline = Pipeline(
        'pca095_linear_svc',
        [Pca(name='Pca', n_components=0.95),
         LinearSvc(name='LinearSvc')])
    result_path = 'result'
    access_grid_config = AccessGridConfig(
        framerate_list=[5, 10, 15, 20, 25],
        total_time_acquisition_list=[500, 1000, 1500, 2000],
        starting_time_acquisition_list=[100],
        center_video_acquisition_list=[False])
    verbose = True
    number_threads = 1
    use_data_augmentation = False
    skip_features_extraction = False
    dict_extracted_features_paths = None
    skip_training = False
    skip_scores_prediction = False
    dict_scores_prediction = None
    recreate = False

    def tearDown(self):
        if os.path.isdir('result'):
            shutil.rmtree('result')

    def test_init_fromfilename_wrong_path(self):
        self.assertRaises(IOError,
                          lambda: Configuration.fromfilename('ACE', 'WRONG'))

    def test_init_fromfilename_wrong_type_evaluation(self):
        self.assertRaises(
            ValueError, lambda: Configuration.fromfilename(
                'WRONG', self.configuration_file))

    def test_init_fromfilename_correct_params(self):
        Configuration.fromfilename('ACE', self.configuration_file)

    def test_init_correct_params_but_database_path_not_defined(self):

        replay_path = None
        if "REPLAY_ATTACK_PATH" in os.environ:
            replay_path = os.environ["REPLAY_ATTACK_PATH"]
            del os.environ["REPLAY_ATTACK_PATH"]

        self.assertRaises(
            EnvironmentError, lambda: Configuration(
                'ACE',
                self.database_paths_filename, ['replay-attack'],
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))

        if replay_path:
            os.environ["REPLAY_ATTACK_PATH"] = replay_path

    def test_init_incorrect_databases_param(self):
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                'WRONG_PARAM',
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))

    def test_init_incorrect_databases_param_no_exist(self):
        self.assertRaises(
            ValueError, lambda: Configuration(
                'ACE',
                self.database_paths_filename, ['no_exist_database'],
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))

    def test_init_incorrect_protocol_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                'WRONG_PARAM',
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_incorrect_features_extractor_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                "WRONG_PARAM",
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_incorrect_pipeline_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                "WRONG_PARAM",
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_result_path_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                None,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_incorrect_result_path_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline, ['WRONG_PARAM'],
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_access_grid_config_list_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=None,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_incorrect_access_grid_config_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config="WRONG_PARAMETER",
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_verbose_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=None,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_wrong_verbose_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose='WRONG',
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_number_threads_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=None,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_use_data_augmentation_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=None,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_skip_features_extraction_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda:
            Configuration('ACE',
                          self.database_paths_filename,
                          self.databases,
                          self.protocols,
                          self.feature_extractor,
                          self.pipeline,
                          self.result_path,
                          access_grid_config=self.access_grid_config,
                          verbose=self.verbose,
                          number_threads=self.number_threads,
                          use_data_augmentation=self.use_data_augmentation,
                          skip_features_extraction=None,
                          dict_extracted_features_paths=self.
                          dict_extracted_features_paths,
                          skip_training=self.skip_training,
                          skip_scores_prediction=self.skip_scores_prediction,
                          dict_scores_prediction=self.dict_scores_prediction,
                          recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_skip_training_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=None,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_skip_scores_prediction(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=None,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=self.recreate))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_none_recreate_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=None))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_init_with_not_valid_categorized_scores_plotter_param(self):
        os.environ["REPLAY_ATTACK_PATH"] = "resources"
        self.assertRaises(
            TypeError, lambda: Configuration(
                'ACE',
                self.database_paths_filename,
                self.databases,
                self.protocols,
                self.feature_extractor,
                self.pipeline,
                self.result_path,
                access_grid_config=self.access_grid_config,
                categorized_scores_plotter="not_valid",
                verbose=self.verbose,
                number_threads=self.number_threads,
                use_data_augmentation=self.use_data_augmentation,
                skip_features_extraction=self.skip_features_extraction,
                dict_extracted_features_paths=self.
                dict_extracted_features_paths,
                skip_training=self.skip_training,
                skip_scores_prediction=self.skip_scores_prediction,
                dict_scores_prediction=self.dict_scores_prediction,
                recreate=None))
        del os.environ["REPLAY_ATTACK_PATH"]

    def test_print_to_file(self):
        filename_result = 'result/configuration.txt'
        configuration = Configuration.fromfilename('ACE',
                                                   self.configuration_file)
        configuration.save_to_file(filename_result)
        self.assertTrue(os.path.isfile(filename_result))

    def test_should_check_if_database_paths_are_loaded_as_global_env(self):
        if "REPLAY_ATTACK_PATH" in os.environ.keys():
            del os.environ["REPLAY_ATTACK_PATH"]
        _ = Configuration(
            'ACE',
            self.database_paths_filename,
            self.databases,
            self.protocols,
            self.feature_extractor,
            self.pipeline,
            self.result_path,
            access_grid_config=self.access_grid_config,
            verbose=self.verbose,
            number_threads=self.number_threads,
            use_data_augmentation=self.use_data_augmentation,
            skip_features_extraction=self.skip_features_extraction,
            dict_extracted_features_paths=self.dict_extracted_features_paths,
            skip_training=self.skip_training,
            skip_scores_prediction=self.skip_scores_prediction,
            dict_scores_prediction=self.dict_scores_prediction,
            recreate=True)
        self.assertTrue(os.environ["REPLAY_ATTACK_PATH"] == "resources")
        del os.environ["REPLAY_ATTACK_PATH"]
Exemplo n.º 9
0
# Database and protocol:
from bob.gradiant.face.databases import DummyDatabase

databases_list = [DummyDatabase('resources')]
protocols_list = ['grandtest']

# Feature extraction:
from bob.gradiant.pad.evaluator import DummyFeaturesExtractor

feature_extractor = DummyFeaturesExtractor()

# Pipeline:
from bob.gradiant.pipelines import Pipeline, Pca, LinearSvc

pipeline = Pipeline('test_approach_pca095_linear_svc',
                    [Pca(name='Pca', n_components=0.95), LinearSvc(name='LinearSvc')])

# Result base path:
result_path = 'result'

# Framerate and time parameters:
from bob.gradiant.core import AccessGridConfig
access_grid_config = AccessGridConfig(framerate_list=[15, 20],
                                      total_time_acquisition_list=[1000, 1500],
                                      starting_time_acquisition_list=[100],
                                      center_video_acquisition_list=[False])

# -----------------------------------------------------------------
# OPTIONAL ARGUMENTS:

# Pad evaluation comparative using the framework bob.gradiant.pad.comparative
database_paths_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                       'database_paths.json')

# Database and protocol:
databases_list = ['msu-mfsd']
protocols_list = ['grandtest']

# Feature extraction:
from bob.gradiant.pad.evaluator import DummyFeaturesExtractor

feature_extractor = DummyFeaturesExtractor()

# Pipeline:
from bob.gradiant.pipelines import Pipeline, Pca, LinearSvc

pipeline = Pipeline('pipeline_pca095_linear_svc', [Pca(name='Pca', n_components=0.95), LinearSvc(name='LinearSvc')])

# Result base path:
result_path = 'result'

# Framerate and time parameters:
framerate_list = [10, 15]
total_time_acquisition_list = [500, 1000]

# -----------------------------------------------------------------

# OPTIONAL ARGUMENTS:


categorized_scores_plotter = None
 def test_should_calls_sklearn_constructor_when_constructs(self):
     LinearSvc(c=0.94)
     LinearSVC.__init__.assert_called_once_with(C=0.94)
# Database and protocol:
databases_list = ['aggregate-database']
protocols_list = ['grandtest']

# Feature extraction:
from bob.gradiant.pad.evaluator import DummyFeaturesExtractor

feature_extractor = DummyFeaturesExtractor()

# Pipeline:
from bob.gradiant.pipelines import Pipeline, Pca, LinearSvc

pipeline = Pipeline(
    'pipeline_pca095_linear_svc',
    [Pca(name='Pca', n_components=0.95),
     LinearSvc(name='LinearSvc')])

# Result base path:
result_path = 'result'

# Framerate and time parameters:
framerate_list = [10, 15]
total_time_acquisition_list = [500, 1000]

# -----------------------------------------------------------------

# OPTIONAL ARGUMENTS:

# Verbose (only True/False are valid):
verbose = True