Exemple #1
0
    def log_total_metrics(self):
        def exp_one(arg):
            return str(arg).center(20)

        def exp_arg(args):
            return [exp_one(arg) for arg in args]

        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(
            self._iou_threshold))
        log_head(' Confusion matrix ')

        sorted_gt_names = sorted(self._class_mapping.keys())
        pred_names = [
            self._class_mapping[gt_name] for gt_name in sorted_gt_names
        ]
        logger.info(''.join(exp_arg([''] + pred_names + ['False Negatives'])))
        for gt_name in sorted_gt_names:
            logger.info(''.join([exp_one(gt_name)] + exp_arg([
                self._confusion_matrix[gt_name, pred_name]
                for pred_name in pred_names
            ]) + [exp_one(self._unmatched_gt[gt_name])]))
            log_line()
        logger.info(''.join([exp_one('False Positives')] + exp_arg(
            [self._unmatched_pred[pred_name]
             for pred_name in pred_names]) + [exp_one('0')]))
        log_line()
Exemple #2
0
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        classes_values = self.get_metrics()
        for i, (cls_gt, pair_values) in enumerate(classes_values.items()):
            average_precision = pair_values[AP]
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(cls_gt,
                                                                           self._gt_to_pred_class_mapping[cls_gt]))
            logger.info('Average Precision (AP): {}'.format(average_precision))

        log_line()
        log_head(' Mean metrics values ')
        logger.info('Mean Average Precision (mAP): {}'.format(self.average_per_class_avg_precision(classes_values)))
        log_line()
Exemple #3
0
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        for i, (gt_class, values) in enumerate(self.get_metrics().items()):
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(gt_class,
                                                                           self._gt_to_pred_class_mapping[gt_class]))
            logger.info('Precision: {}'.format(values[PRECISION]))
            logger.info('Recall: {}'.format(values[RECALL]))

        log_line()
        log_head(' Total metrics values ')
        total_values = self.get_total_metrics()
        logger.info('Precision: {}'.format(total_values[PRECISION]))
        logger.info('Recall: {}'.format(total_values[RECALL]))
        log_line()
Exemple #4
0
    def log_total_metrics(self):
        common_info = """
                P = condition positive (the number of real positive cases in the data)
                N = condition negative (the number of real negative cases in the data)
                TP = True Positive prediction
                TN = True Negative prediction
                FP = False Positive prediction (Type I error)
                FN = False Negative prediction (Type II error)
                Accuracy = (TP + TN)/(TP + TN + FP + FN) = TRUE/TOTAL
                Precision = TP / (TP + FP)
                Recall = TP / (TP + FN)
                F1-Measure = (2 * TP) / (2 * TP + FP + FN)
                """

        log_line()
        log_line(c='*')
        for line in common_info.split('\n'):
            line = line.strip()
            if len(line) > 0:
                logger.info(line.ljust(80))

        log_line(c='*')
        log_line()

        def print_evaluation_values(tag_pair_metrics):
            labels = [
                ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE,
                TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE
            ]
            for label in labels:
                logger.info('    {0}:   {1:2.4f}'.format(
                    label.ljust(16), tag_pair_metrics[label]))

        for i, (tag_name_gt,
                tag_metrics) in enumerate(self.get_metrics().items(), start=1):
            logger.info('{}) {} <--> {}:'.format(
                i, tag_name_gt, self._tags_mapping[tag_name_gt]))
            print_evaluation_values(tag_metrics)
            log_line()

        logger.info('Total values:')
        total_values = self.get_total_metrics()
        print_evaluation_values(total_values)
        log_line()

        log_line(c='*')