예제 #1
0
    def __init__(self, configuration):
        if not isinstance(configuration, Configuration):
            raise TypeError("configuration parameter must be a object of Configuration class")

        self.informer = Informer(verbose=configuration.verbose)
        self._set_parameter_grid(configuration)

        self.data_augmentator = DataAugmentator(configuration.use_data_augmentation)
        super(FeaturesExtractionProtocol, self).__init__(configuration)
    def __init__(self, configuration):
        if not isinstance(configuration, EndToEndConfiguration):
            raise TypeError(
                "configuration parameter must be a object of EndToEndConfiguration class"
            )

        self.informer = Informer(verbose=configuration.verbose)
        self.configuration = configuration
        super(EndToEndEvaluationProtocol, self).__init__(configuration)
예제 #3
0
    def __init__(self, configuration):
        if not isinstance(configuration, Configuration):
            raise TypeError(
                "configuration parameter must be a object of Configuration class"
            )

        self.informer = Informer(verbose=configuration.verbose)
        self.data_augmentator = DataAugmentator(
            configuration.use_data_augmentation)

        super(AlgorithmicUnconstrainedEvaluationProtocol,
              self).__init__(configuration)
예제 #4
0
    def test_default_usage(self):
        title = 'access_name'
        informer = Informer(title)
        informer.message('message')
        informer.message('message_blue', color=Colors.FG.blue)

        total = 100
        for count in range(total + 1):
            informer.progress('loading', count, total, color=Colors.FG.green)
            time.sleep(0.01)
    def test_run_with_recreate_false_with_exist_file(self):
        self.create_fake_h5_file()

        features_saver = FeaturesSaver(self.base_path)
        informer = Informer('access [1/100]')

        features_extractor_manager = FeaturesExtractorManager(
            self.mock_features_extractor, features_saver)
        features_extractor_manager.run(self.mock_access,
                                       informer=informer,
                                       recreate=False)

        self.mock_access.load.assert_not_called()
        self.mock_features_extractor.run.assert_not_called()
        features_saver.save.assert_not_called()
        informer = Informer('access [1/100]')

        if os.path.isdir(self.base_path):
            shutil.rmtree(self.base_path)
    def test_run_with_recreate_false_with_no_exists_file(self):

        features_saver = FeaturesSaver(self.base_path)
        informer = Informer('access [1/100]')

        features_extractor_manager = FeaturesExtractorManager(
            self.mock_features_extractor, features_saver)
        features_extractor_manager.run(self.mock_access,
                                       informer=informer,
                                       recreate=False)

        self.mock_access.load.assert_called_once()
        self.mock_features_extractor.run.assert_called_once_with(
            self.dict_images, annotations=None)
        FeaturesSaver.save.assert_called_once()
class EndToEndEvaluationProtocol(EvaluationProtocol):
    parameter_grid = []
    base_path = None

    def __init__(self, configuration):
        if not isinstance(configuration, EndToEndConfiguration):
            raise TypeError(
                "configuration parameter must be a object of EndToEndConfiguration class"
            )

        self.informer = Informer(verbose=configuration.verbose)
        self.configuration = configuration
        super(EndToEndEvaluationProtocol, self).__init__(configuration)

    def run(self, function_to_run='run'):

        for database in self.configuration.databases_list:
            for protocol in self.configuration.protocols_list:
                self.tag_evaluation = '@'.join([database.name(), protocol])
                self.informer.highlight_message(
                    self.tag_evaluation,
                    title=evaluation_long_names[
                        self.configuration.type_evaluation],
                    color=Colors.FG.green)

            # Extraction ----------------------------------------------------

            scores_path = os.path.join(self.configuration.result_path,
                                       self.tag_evaluation,
                                       self.configuration.face_pad.name,
                                       'scores')
            if not os.path.isdir(scores_path):
                os.makedirs(scores_path)

            scores_filename = os.path.join(scores_path, 'end_to_end_info.h5')

            if not os.path.isfile(scores_filename):
                end_to_end_info = self.__extract_pad_results(
                    scores_filename, database, protocol)
            else:
                self.informer.highlight_message(
                    'ok',
                    title='\tLoading end_to_end_info from \'{}\''.format(
                        scores_filename),
                    color=Colors.FG.lightgrey)
                end_to_end_info = EndToEndInfo.fromfilename(scores_filename)

            # Evaluation ------------------------------------------------------
            result_path = os.path.join(self.configuration.result_path,
                                       self.tag_evaluation,
                                       self.configuration.face_pad.name,
                                       'result')
            self.__evaluation(result_path, end_to_end_info)

    def __extract_pad_results(self, scores_filename, database, protocol):

        self.informer.highlight_message(
            'end-to-end parameters [framerate = {}, total_time_acquisition = {}]'
            .format(self.configuration.framerate,
                    self.configuration.total_time_acquisition),
            title='\tEnd-to-end extraction',
            color=Colors.FG.lightcyan)

        access_modifier = AccessModifier(
            self.configuration.framerate,
            self.configuration.total_time_acquisition)

        if database.type_database is TypeDatabase.ALL_FILES_TOGETHER:
            dict_accesses = database.get_accesses_by_subset(access_modifier)
        else:
            dict_accesses = database.get_all_accesses(access_modifier)
        self.informer.set_title('Extracting pad results')
        info = cpuinfo.get_cpu_info()

        name_algorithm = '{}_f{}_ta{}'.format(
            self.configuration.face_pad.name, self.configuration.framerate,
            self.configuration.total_time_acquisition)
        framerate = self.configuration.framerate
        total_time_of_acquisition = self.configuration.total_time_acquisition
        processor = ' '.join(info['brand'].split())

        processed_frames = 0
        scores_list = []
        time_of_delay_list = []
        cpu_time_list = []
        labels_list = []
        benchmark_labels_list = []

        for subset in SUBSETS_TO_EVALUATE:
            subset_accesses = dict_accesses[subset]
            subset_ground_truth = database.get_ground_truth(protocol)[subset]
            failure_accesses = 0

            for i, access in enumerate(subset_accesses):
                progressBar(i + 1, len(subset_accesses), access.name)
                dict_images = access.load()
                start_time_cpu_processing = time.time() * 1000
                for key, image in dict_images.items():
                    self.configuration.face_pad.process(image)
                    processed_frames += 1
                    if self.configuration.face_pad.isfinished():
                        break
                cpu_time = time.time() * 1000 - start_time_cpu_processing

                start_time_decision = time.time() * 1000
                label, score = self.configuration.face_pad.get_decission()
                time_of_delay = time.time() * 1000 - start_time_decision
                if label == "FAILURE_TO_COMPUTE":
                    failure_accesses += 1
                self.configuration.face_pad.reset()

                scores_list.append(score)
                time_of_delay_list.append(time_of_delay)
                cpu_time_list.append(cpu_time)
                labels_list.append(label)

                benchmark_labels_list.append('ATTACK' if subset_ground_truth[
                    access.name] == 1 else 'NO_ATTACK')

        print('\n\t\tFAILURE_TO_COMPUTE accesses: ' + str(failure_accesses) +
              '/' + str(len(subset_accesses)))

        end_to_end_info = EndToEndInfo(name_algorithm, framerate,
                                       total_time_of_acquisition, processor,
                                       processed_frames, scores_list,
                                       time_of_delay_list, cpu_time_list,
                                       labels_list, benchmark_labels_list)
        end_to_end_info.save(scores_filename)
        return end_to_end_info

    def __evaluation(self,
                     result_path,
                     end_to_end_info,
                     name_algorithm='Evaluated algorithm'):

        self.informer.highlight_message('ok',
                                        title='\tEnd-to-end evaluation',
                                        color=Colors.FG.lightcyan)

        end_to_end_table_generator = EndToEndTableGenerator(
            self.tag_evaluation, self.configuration.face_pad.name,
            dict(end_to_end_info), result_path)
        end_to_end_table_generator.run(name_algorithm=name_algorithm)
예제 #8
0
class FeaturesExtractionProtocol(EvaluationProtocol):
    parameter_grid = []
    base_path = None

    def __init__(self, configuration):
        if not isinstance(configuration, Configuration):
            raise TypeError("configuration parameter must be a object of Configuration class")

        self.informer = Informer(verbose=configuration.verbose)
        self._set_parameter_grid(configuration)

        self.data_augmentator = DataAugmentator(configuration.use_data_augmentation)
        super(FeaturesExtractionProtocol, self).__init__(configuration)

    def _set_parameter_grid(self, configuration):
        if configuration.access_grid_config:
            self.parameter_grid = configuration.access_grid_config.get_parameter_grid()
        else:
            self.parameter_grid = None

    def run(self):

        for database in self.configuration.databases_list:

            name_experiment = 'experiment'

            experiment_paths = ExperimentPaths(os.path.join(self.configuration.result_path, database.name()),
                                               name_experiment)
            self.informer.highlight_message(database.name(),
                                            title=evaluation_long_names[self.configuration.type_evaluation],
                                            color=Colors.FG.green)
            protocol_utils = ProtocolUtils(self.informer,
                                           self.configuration,
                                           experiment_paths,
                                           name_experiment,
                                           self.data_augmentator)

            # Extraction ----------------------------------------------------

            if self.parameter_grid:
                self.informer.highlight_message(
                    self.configuration.access_grid_config.get_message_summary_parameter_grid(),
                    title=' Extracting features',
                    color=Colors.FG.lightcyan)
                for parameters in self.parameter_grid:
                    self.informer.highlight_message(
                        self.configuration.access_grid_config.get_format_message_from_parameters(parameters),
                        title='  Extracting features',
                        color=Colors.FG.lightgrey)

                    features_path = experiment_paths.get_features_path(parameters)
                    protocol_utils.extract_features(features_path, database, parameters=parameters)
            else:
                self.informer.highlight_message('|',
                                                title=' Extracting features for every frame',
                                                color=Colors.FG.lightcyan)
                features_path = os.path.join(experiment_paths.get_features_path_root(), "whole_video")
                protocol_utils.extract_features(features_path, database)

            # -----------------------------------------------------------------
            self.informer.highlight_message(LINE, title='  ok',
                                            color=Colors.FG.lightcyan)
예제 #9
0
class AlgorithmicUnconstrainedEvaluationProtocol(EvaluationProtocol):
    base_path = None

    def __init__(self, configuration):
        if not isinstance(configuration, Configuration):
            raise TypeError(
                "configuration parameter must be a object of Configuration class"
            )

        self.informer = Informer(verbose=configuration.verbose)
        self.data_augmentator = DataAugmentator(
            configuration.use_data_augmentation)

        super(AlgorithmicUnconstrainedEvaluationProtocol,
              self).__init__(configuration)

    def run(self):

        for database in self.configuration.databases_list:
            self.experiment_paths = ExperimentPaths(
                os.path.join(self.configuration.result_path, database.name()),
                self.configuration.pipeline.name)
            self.name_experiment = self.configuration.pipeline.name
            self.protocol_utils = ProtocolUtils(self.informer,
                                                self.configuration,
                                                self.experiment_paths,
                                                self.name_experiment,
                                                self.data_augmentator)
            self.informer.highlight_message(
                database.name(),
                title=evaluation_long_names[
                    self.configuration.type_evaluation],
                color=Colors.FG.green)

            # Extraction ----------------------------------------------------
            features_path = self.experiment_paths.get_features_path()
            if self.configuration.skip_scores_prediction:
                self.informer.highlight_message(
                    message='ok',
                    title=
                    '  Skipping features extraction (skip_scores_prediction=True)',
                    color=Colors.FG.lightred)
            else:
                if self.configuration.skip_features_extraction:
                    self.informer.highlight_message(
                        message='ok',
                        title='  Skipping features extraction '
                        '(skip_features_extraction=True)',
                        color=Colors.FG.lightred)

                    if self.configuration.dict_extracted_features_paths:
                        extracted_features_manager = ExtractedFeaturesManager(
                            self.configuration.dict_extracted_features_paths)
                        extracted_features_manager.create_features_path_link_to(
                            self.experiment_paths.get_features_path(),
                            database, self.configuration.type_evaluation)
                    else:
                        raise Warning(
                            'dict_extracted_features_paths configuration '
                            'is not set when skip_features_extraction==True.')
                else:
                    self.informer.highlight_message(
                        'Using whole video and all frames',
                        title=' Extracting features',
                        color=Colors.FG.lightcyan)
                    self.protocol_utils.extract_features(
                        features_path, database)
            # -----------------------------------------------------------------

            for protocol in self.configuration.protocols_list:
                if protocol in database.get_protocols():
                    # Training -------------------------------------------------------
                    pipeline_path = self.experiment_paths.get_pipeline_path(
                        protocol)
                    self.protocol_utils.pipeline(database, protocol,
                                                 features_path, pipeline_path)
                    # -----------------------------------------------------------------

                    # Evaluation ------------------------------------------------------
                    self.protocol_utils.evaluation(database, protocol)
                    # -----------------------------------------------------------------
                else:
                    raise Warning(
                        'Skipped \'{}\' protocol. T'
                        'his protocol is not implemented in {} dataset, '
                        'try with {}'.format(protocol, database.name(),
                                             database.get_subsets()))