Esempio n. 1
0
    def display_metrics(classifier_list: list, test_labels_all, pred: list, proba: list, label_list: list):
        """
        Function that display some metrics in regard of the training results

        :arg
            self (Trainer): instance of the class
            classifier_list (list): List with selected classifier names
            test_labels_all (numpy array): Numpy array containing target values of the testing set
            pred (list): list of the prediction. Each index of the list contains a numpy array
            proba (list): list of the probability. Each index of the list contains a numpy array
            label_list (list): list of the name of pathologies

        :return
            None
        """
        metrics = Metrics()

        plt.figure()
        plt.title('ROC Curve')
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        for i, value in enumerate(proba):
            fpr, tpr = metrics.roc_metrics(test_labels_all, value)
            plt.plot(fpr, tpr, label=classifier_list[i])

        if len(classifier_list) > 1:
            mean_proba = np.dstack(proba)
            mean_proba = np.mean(mean_proba, axis=2)
            fpr, tpr = metrics.roc_metrics(test_labels_all, mean_proba)
            plt.plot(fpr, tpr, label='Voting classifiers')

        plt.legend(loc='lower right')
        plt.show(block=False)

        plt.figure()
        plt.title('Precision-Recall Curve')
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        for i, value in enumerate(proba):
            precision, recall = metrics.precision_recall(test_labels_all, value)
            plt.plot(precision, recall, label=classifier_list[i])

        if len(classifier_list) > 1:
            mean_proba = np.dstack(proba)
            mean_proba = np.mean(mean_proba, axis=2)
            fpr, tpr = metrics.precision_recall(test_labels_all, mean_proba)
            plt.plot(fpr, tpr, label='Voting classifiers')

        plt.legend(loc='lower left')
        plt.show(block=False)

        if len(classifier_list) > 1:
            mean_pred = np.dstack(pred)
            mean_pred = np.mean(mean_pred, axis=2)
            mean_pred[mean_pred >= 0.5] = 1
            mean_pred[mean_pred < 0.5] = 0
            pred = mean_pred
        else:
            pred = pred[0]

        cohen_kappa_score, kappa_class = metrics.cohen_kappa_score(test_labels_all, pred)
        f1_score, f1_class = metrics.f1_score(test_labels_all, pred)
        accuracy, accuracy_class = metrics.accuracy(test_labels_all, pred)
        precision, precision_class = metrics.precision(test_labels_all, pred)
        recall, recall_class = metrics.recall(test_labels_all, pred)
        #
        print('Cohen: {}'.format(cohen_kappa_score))
        print('F1: {}'.format(f1_score))
        print('Accuracy: {}'.format(accuracy))
        print('Precision: {}'.format(precision))
        print('Recall: {}'.format(recall))

        titles = ['names', 'Cohen', 'F1_score', 'Accuracy', 'Precision', 'Recall']
        kappa_class_disp = ['%.4f' % elem for elem in kappa_class]
        f1_class_disp = ['%.4f' % elem for elem in f1_class]
        accuracy_class_disp = ['%.4f' % elem for elem in accuracy_class]
        precision_class_disp = ['%.4f' % elem for elem in precision_class]
        recall_class_disp = ['%.4f' % elem for elem in recall_class]

        element = [titles] + list(
           zip(label_list, kappa_class_disp, f1_class_disp, accuracy_class_disp, precision_class_disp,
               recall_class_disp))
        for i, d in enumerate(element):
           line = '|'.join(str(x).ljust(19) for x in d)
           print(line)
           if i == 0:
               print('-' * (len(line)-10))