Пример #1
0
 def plot_top_k_sensitivity_by_concept(self):
     if self.results is None:
         raise ValueError(
             'results parameter is None, please run an evaluation first')
     concepts = utils.get_concept_items(self.concepts, key='label')
     metrics = [
         item['metrics']['sensitivity']
         for item in self.results['individual']
     ]
     visualizer.plot_concept_metrics(concepts, metrics, 'Top-k',
                                     'Sensitivity')
Пример #2
0
    def get_metrics(self,
                    probabilities,
                    labels,
                    top_k=1,
                    concept_labels=None,
                    filter_indices=None,
                    confusion_matrix=False,
                    save_confusion_matrix_path=None,
                    show_confusion_matrix_text=True):
        '''
        Print to screen metrics from experiment given probabilities and labels

        Args:
            probabilities: Probabilities from softmax layer
            labels: Ground truth labels
            K: A tuple of the top-k predictions to consider. E.g. K = (1,2,3,4,5) is top-5 preds
            concept_labels: List containing the concept_labels
            filter_indices: If given take only the predictions corresponding to that indices to compute metrics
            confusion_matrix: If True show the confusion matrix
            save_confusion_matrix_path: If path specified save confusion matrix there

        Returns: Dictionary with metrics for each concept

        '''
        self.combined_probabilities = utils.combine_probabilities(
            probabilities, self.combination_mode)

        concept_labels = concept_labels or utils.get_concept_items(
            self.concepts, key='label')

        if filter_indices is not None:
            self.combined_probabilities = self.combined_probabilities[
                filter_indices]
            labels = labels[filter_indices]

        y_true = labels.argmax(axis=1)

        # Print sensitivity and precision for different values of K.
        results = metrics.metrics_top_k(self.combined_probabilities,
                                        y_true,
                                        concepts=concept_labels,
                                        top_k=top_k)

        # Show metrics visualization as a confusion matrix
        if confusion_matrix:
            self.plot_confusion_matrix(
                confusion_matrix=results['average']['confusion_matrix'],
                concept_labels=concept_labels,
                save_path=save_confusion_matrix_path,
                show_text=show_confusion_matrix_text,
                show_labels=show_confusion_matrix_text)

        return results
Пример #3
0
def evaluator_animals_mobilenet_class_inference_initialized(test_animals_dataset_path, test_animals_mobilenet_path,
                                                            test_animals_dictionary_path):
    evaluator = Evaluator(
        model_path=test_animals_mobilenet_path,
        concept_dictionary_path=test_animals_dictionary_path,
        batch_size=1
    )
    evaluator.data_dir = test_animals_dataset_path
    evaluator.concepts = utils.get_dictionary_concepts(test_animals_dictionary_path)
    group_concepts = utils.get_default_concepts(evaluator.data_dir)
    evaluator.concept_labels = utils.get_concept_items(concepts=group_concepts, key='label')
    return evaluator
Пример #4
0
def evaluator_animals_ensemble_class_inference_initialized(test_animals_dataset_path, test_animals_ensemble_path,
                                                           test_animals_dictionary_path):
    evaluator = Evaluator(
        ensemble_models_dir=test_animals_ensemble_path,
        concept_dictionary_path=test_animals_dictionary_path,
        combination_mode='arithmetic',
        batch_size=1
    )
    evaluator.data_dir = test_animals_dataset_path
    evaluator.concepts = utils.get_dictionary_concepts(test_animals_dictionary_path)
    group_concepts = utils.get_default_concepts(evaluator.data_dir)
    evaluator.concept_labels = utils.get_concept_items(concepts=group_concepts, key='label')
    return evaluator
Пример #5
0
    def plot_confusion_matrix(self,
                              confusion_matrix,
                              concept_labels=None,
                              save_path=None,
                              show_text=True,
                              show_labels=True):
        '''

        Args:
            probabilities: Probabilities from softmax layer
            labels: Ground truth labels
            concept_labels: List containing the class labels
            save_path: If path specified save confusion matrix there

        Returns: Shows the confusion matrix in the screen

        '''
        concept_labels = concept_labels or utils.get_concept_items(
            self.concepts, key='label')
        visualizer.plot_confusion_matrix(confusion_matrix,
                                         concepts=concept_labels,
                                         save_path=save_path,
                                         show_text=show_text,
                                         show_labels=show_labels)
Пример #6
0
    def get_image_paths_by_prediction(self,
                                      probabilities,
                                      labels,
                                      concept_labels=None,
                                      image_paths=None):
        '''
        Return the list of images given its predictions.
        Args:
            probabilities: Probabilities given by the model [n_samples,n_classes]
            labels: Ground truth labels (categorical)
            concept_labels: List with class names (by default last evaluation)
            image_paths: List with image_paths (by default last evaluation)
            combination_mode: Ways of combining the model's probabilities to obtain the final prediction.
                'maximum': predictions are obtained by choosing the maximum probabity from each class
                'geometric': predictions are obtained by a geometric mean of all the probabilities
                'arithmetic': predictions are obtained by a arithmetic mean of all the probabilities
                'harmonic': predictions are obtained by a harmonic mean of all the probabilities

        Returns: A dictionary containing a list of images per confusion matrix square (relation ClassA_ClassB), and the
        predicted probabilities

        '''
        self.combined_probabilities = utils.combine_probabilities(
            probabilities, self.combination_mode)

        if image_paths is None:
            image_paths = self.image_paths

        if self.combined_probabilities.shape[0] != len(image_paths):
            raise ValueError(
                'Length of probabilities (%i) do not coincide with the number of image paths (%i)'
                % (self.combined_probabilities.shape[0], len(image_paths)))

        concept_labels = concept_labels or utils.get_concept_items(
            self.concepts, key='label')

        predictions = np.argmax(self.combined_probabilities, axis=1)
        y_true = labels.argmax(axis=1)
        dict_image_paths_concept = {}

        for name_1 in concept_labels:
            for name_2 in concept_labels:
                if name_1 == name_2:
                    dict_image_paths_concept.update({
                        name_1 + '_' + name_2: {
                            'image_paths': [],
                            'probs': [],
                            'diagonal': True
                        }
                    })
                else:
                    dict_image_paths_concept.update({
                        name_1 + '_' + name_2: {
                            'image_paths': [],
                            'probs': [],
                            'diagonal': False
                        }
                    })

        for i, pred in enumerate(predictions):
            predicted_label = concept_labels[pred]
            correct_label = concept_labels[y_true[i]]
            list_image_paths = dict_image_paths_concept[str(
                correct_label + '_' + predicted_label)]['image_paths']
            list_image_paths.append(image_paths[i])
            list_probs = dict_image_paths_concept[str(
                correct_label + '_' + predicted_label)]['probs']
            list_probs.append(self.combined_probabilities[i])
            diagonal = dict_image_paths_concept[str(
                correct_label + '_' + predicted_label)]['diagonal']
            dict_image_paths_concept.update({
                correct_label + '_' + predicted_label: {
                    'image_paths': list_image_paths,
                    'probs': list_probs,
                    'diagonal': diagonal
                }
            })

        return dict_image_paths_concept
Пример #7
0
    def evaluate(self,
                 data_dir=None,
                 top_k=1,
                 filter_indices=None,
                 confusion_matrix=False,
                 data_augmentation=None,
                 save_confusion_matrix_path=None,
                 show_confusion_matrix_text=True):
        '''
        Evaluate a set of images. Each sub-folder under 'data_dir/' will be considered as a different class.
        E.g. 'data_dir/class_1/dog.jpg' , 'data_dir/class_2/cat.jpg

        Args:
            data_dir: Data directory to load the images from
            top_k: The top-k predictions to consider. E.g. top_k = 5 is top-5 preds
            filter_indices: If given take only the predictions corresponding to that indices to compute metrics
            confusion_matrix: True/False whether to show the confusion matrix
            It includes the addition of data_augmentation as an argument. It is a dictionary consisting of 3 elements:
            - 'scale_sizes': 'default' (4 scales similar to Going Deeper with Convolutions work) or a list of sizes.
            Each scaled image then will be cropped into three square parts.
            - 'transforms': list of transforms to apply to these crops in addition to not
            applying any transform ('horizontal_flip', 'vertical_flip', 'rotate_90', 'rotate_180', 'rotate_270' are
            supported now).
            - 'crop_original': 'center_crop' mode allows to center crop the original image prior do the rest of
            transforms, scalings + croppings.

            save_confusion_matrix_path: If path specified save confusion matrix there


        Returns: Probabilities computed and ground truth labels associated.

        '''
        self.top_k = top_k
        self.data_dir = data_dir or self.data_dir
        self.data_augmentation = data_augmentation or self.data_augmentation
        if self.data_dir is None:
            raise ValueError(
                'No data directory found, please specify a valid data directory under variable `data_dir`'
            )
        else:
            # Create dictionary containing class names
            if self.concepts is None:
                self.concepts = utils.get_default_concepts(self.data_dir)

            # Obtain labels to show on the metrics results
            self.concept_labels = utils.get_concept_items(self.concepts,
                                                          key='label')

            if hasattr(self, 'concept_dictionary'):
                if utils.compare_group_test_concepts(self.concept_labels, self.concept_dictionary) \
                        and utils.check_concept_unique(self.concept_dictionary):
                    # Create Keras image generator and obtain probabilities
                    self.probabilities, self.labels = self._compute_probabilities_generator(
                        data_dir=self.data_dir,
                        data_augmentation=self.data_augmentation)
                    self.compute_inference_probabilities(self.probabilities)

            else:
                # Create Keras image generator and obtain probabilities
                self.probabilities, self.labels = self._compute_probabilities_generator(
                    data_dir=self.data_dir,
                    data_augmentation=self.data_augmentation)

            # Compute metrics
            self.results = self.get_metrics(
                probabilities=self.probabilities,
                labels=self.labels,
                concept_labels=self.concept_labels,
                top_k=top_k,
                filter_indices=filter_indices,
                confusion_matrix=confusion_matrix,
                save_confusion_matrix_path=save_confusion_matrix_path,
                show_confusion_matrix_text=show_confusion_matrix_text)

        return self.probabilities, self.labels
Пример #8
0
def test_get_class_dictionaries_items(test_catdog_dataset_path):
    concepts_by_default = utils.get_default_concepts(test_catdog_dataset_path)
    label_output = utils.get_concept_items(concepts_by_default, 'label')
    id_output = utils.get_concept_items(concepts_by_default, 'id')
    assert label_output == id_output == ['cat', 'dog']