Esempio n. 1
0
def evaluator_animals_mobilenet_class_inference_initialized(test_animals_dataset_path, test_animals_mobilenet_path,
                                                            test_animals_dictionary_path):
    evaluator = Evaluator(
        model_path=test_animals_mobilenet_path,
        concept_dictionary_path=test_animals_dictionary_path,
        batch_size=1
    )
    evaluator.data_dir = test_animals_dataset_path
    evaluator.concepts = utils.get_dictionary_concepts(test_animals_dictionary_path)
    group_concepts = utils.get_default_concepts(evaluator.data_dir)
    evaluator.concept_labels = utils.get_concept_items(concepts=group_concepts, key='label')
    return evaluator
Esempio n. 2
0
def evaluator_animals_ensemble_class_inference_initialized(test_animals_dataset_path, test_animals_ensemble_path,
                                                           test_animals_dictionary_path):
    evaluator = Evaluator(
        ensemble_models_dir=test_animals_ensemble_path,
        concept_dictionary_path=test_animals_dictionary_path,
        combination_mode='arithmetic',
        batch_size=1
    )
    evaluator.data_dir = test_animals_dataset_path
    evaluator.concepts = utils.get_dictionary_concepts(test_animals_dictionary_path)
    group_concepts = utils.get_default_concepts(evaluator.data_dir)
    evaluator.concept_labels = utils.get_concept_items(concepts=group_concepts, key='label')
    return evaluator
Esempio n. 3
0
    def evaluate(self,
                 data_dir=None,
                 top_k=1,
                 filter_indices=None,
                 confusion_matrix=False,
                 data_augmentation=None,
                 save_confusion_matrix_path=None,
                 show_confusion_matrix_text=True):
        '''
        Evaluate a set of images. Each sub-folder under 'data_dir/' will be considered as a different class.
        E.g. 'data_dir/class_1/dog.jpg' , 'data_dir/class_2/cat.jpg

        Args:
            data_dir: Data directory to load the images from
            top_k: The top-k predictions to consider. E.g. top_k = 5 is top-5 preds
            filter_indices: If given take only the predictions corresponding to that indices to compute metrics
            confusion_matrix: True/False whether to show the confusion matrix
            It includes the addition of data_augmentation as an argument. It is a dictionary consisting of 3 elements:
            - 'scale_sizes': 'default' (4 scales similar to Going Deeper with Convolutions work) or a list of sizes.
            Each scaled image then will be cropped into three square parts.
            - 'transforms': list of transforms to apply to these crops in addition to not
            applying any transform ('horizontal_flip', 'vertical_flip', 'rotate_90', 'rotate_180', 'rotate_270' are
            supported now).
            - 'crop_original': 'center_crop' mode allows to center crop the original image prior do the rest of
            transforms, scalings + croppings.

            save_confusion_matrix_path: If path specified save confusion matrix there


        Returns: Probabilities computed and ground truth labels associated.

        '''
        self.top_k = top_k
        self.data_dir = data_dir or self.data_dir
        self.data_augmentation = data_augmentation or self.data_augmentation
        if self.data_dir is None:
            raise ValueError(
                'No data directory found, please specify a valid data directory under variable `data_dir`'
            )
        else:
            # Create dictionary containing class names
            if self.concepts is None:
                self.concepts = utils.get_default_concepts(self.data_dir)

            # Obtain labels to show on the metrics results
            self.concept_labels = utils.get_concept_items(self.concepts,
                                                          key='label')

            if hasattr(self, 'concept_dictionary'):
                if utils.compare_group_test_concepts(self.concept_labels, self.concept_dictionary) \
                        and utils.check_concept_unique(self.concept_dictionary):
                    # Create Keras image generator and obtain probabilities
                    self.probabilities, self.labels = self._compute_probabilities_generator(
                        data_dir=self.data_dir,
                        data_augmentation=self.data_augmentation)
                    self.compute_inference_probabilities(self.probabilities)

            else:
                # Create Keras image generator and obtain probabilities
                self.probabilities, self.labels = self._compute_probabilities_generator(
                    data_dir=self.data_dir,
                    data_augmentation=self.data_augmentation)

            # Compute metrics
            self.results = self.get_metrics(
                probabilities=self.probabilities,
                labels=self.labels,
                concept_labels=self.concept_labels,
                top_k=top_k,
                filter_indices=filter_indices,
                confusion_matrix=confusion_matrix,
                save_confusion_matrix_path=save_confusion_matrix_path,
                show_confusion_matrix_text=show_confusion_matrix_text)

        return self.probabilities, self.labels
Esempio n. 4
0
def test_default_concepts(test_catdog_dataset_path):
    concepts_by_default = utils.get_default_concepts(test_catdog_dataset_path)
    assert concepts_by_default == [{'label': 'cat', 'id': 'cat'},
                                   {'label': 'dog', 'id': 'dog'}]
Esempio n. 5
0
def test_get_class_dictionaries_items(test_catdog_dataset_path):
    concepts_by_default = utils.get_default_concepts(test_catdog_dataset_path)
    label_output = utils.get_concept_items(concepts_by_default, 'label')
    id_output = utils.get_concept_items(concepts_by_default, 'id')
    assert label_output == id_output == ['cat', 'dog']