def print_evaluation_values(tag_pair_metrics):
     labels = [
         ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE,
         TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE
     ]
     for label in labels:
         logger.info('    {0}:   {1:2.4f}'.format(
             label.ljust(16), tag_pair_metrics[label]))
Пример #2
0
def _report_metrics(m_type, epoch, metrics):
    logger.info('metrics',
                extra={
                    'event_type': EventType.METRICS,
                    'type': m_type,
                    'epoch': epoch,
                    'metrics': metrics
                })
Пример #3
0
def report_dtl_verification_finished(output):
    '''
    Logs a message with level INFO on logger
    :param output: str
    '''
    logger.info('Verification finished.',
                extra={
                    'output': output,
                    'event_type': EventType.TASK_VERIFIED
                })
Пример #4
0
def report_checkpoint_saved(checkpoint_idx, subdir, sizeb, best_now,
                            optional_data):
    logger.info('checkpoint',
                extra={
                    'event_type': EventType.CHECKPOINT,
                    'id': checkpoint_idx,
                    'subdir': subdir,
                    'sizeb': sizeb,
                    'best_now': best_now,
                    'optional': optional_data
                })
Пример #5
0
    def _check_projects_compatible_structure(self):
        if self._project_gt.datasets.keys() != self._project_pred.datasets.keys():  # Keys is sorted - ok
            raise RuntimeError('Projects must contain same datasets.')
        if self._project_gt.total_items != self._project_pred.total_items:
            raise RuntimeError('Projects must contain same number of samples.')
        for ds_gt in self._project_gt.datasets:
            ds_pred = self._project_pred.datasets.get(ds_gt.name)
            for sample_name in ds_gt:
                if not ds_pred.item_exists(sample_name):
                    raise RuntimeError('Projects must contain identically named samples in respective datasets. ' +
                                       'Ground truth project has sample {!r} in dataset {!r}, but prediction project ' +
                                       'does not.'.format(sample_name, ds_gt.name))

        logger.info('Projects structure has been read. Samples: {} per project.'.format(self._project_gt.total_items))
Пример #6
0
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        classes_values = self.get_metrics()
        for i, (cls_gt, pair_values) in enumerate(classes_values.items()):
            average_precision = pair_values[AP]
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(cls_gt,
                                                                           self._gt_to_pred_class_mapping[cls_gt]))
            logger.info('Average Precision (AP): {}'.format(average_precision))

        log_line()
        log_head(' Mean metrics values ')
        logger.info('Mean Average Precision (mAP): {}'.format(self.get_total_metrics()[AP]))
        log_line()
Пример #7
0
    def log_total_metrics(self):
        logger.info('**************** Result IoU metric values ****************')
        logger.info('NOTE! Values for "intersection" and "union" are in pixels.')
        for i, (cls_gt, values) in enumerate(self.get_metrics().items(), start=1):
            iou_line = _iou_log_line(values[IOU], values[INTERSECTION], values[UNION])
            logger.info('{}. Classes {} <-> {}:   {}'.format(i, cls_gt, self._class_mapping[cls_gt], iou_line))

        total_values = self.get_total_metrics()
        logger.info(
            'Total:   {}'.format(_iou_log_line(total_values[IOU], total_values[INTERSECTION], total_values[UNION])))
Пример #8
0
    def log_total_metrics(self):
        def exp_one(arg):
            return str(arg).center(20)

        def exp_arg(args):
            return [exp_one(arg) for arg in args]

        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(
            self._iou_threshold))
        log_head(' Confusion matrix ')

        sorted_gt_names = sorted(self._class_mapping.keys())
        pred_names = [
            self._class_mapping[gt_name] for gt_name in sorted_gt_names
        ]
        logger.info(''.join(exp_arg([''] + pred_names + ['False Negatives'])))
        for gt_name in sorted_gt_names:
            logger.info(''.join([exp_one(gt_name)] + exp_arg([
                self._confusion_matrix[gt_name, pred_name]
                for pred_name in pred_names
            ]) + [exp_one(self._unmatched_gt[gt_name])]))
            log_line()
        logger.info(''.join([exp_one('False Positives')] + exp_arg(
            [self._unmatched_pred[pred_name]
             for pred_name in pred_names]) + [exp_one('0')]))
        log_line()
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        for i, (gt_class, values) in enumerate(self.get_metrics().items()):
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(gt_class,
                                                                           self._gt_to_pred_class_mapping[gt_class]))
            logger.info('Precision: {}'.format(values[PRECISION]))
            logger.info('Recall: {}'.format(values[RECALL]))

        log_line()
        log_head(' Total metrics values ')
        total_values = self.get_total_metrics()
        logger.info('Precision: {}'.format(total_values[PRECISION]))
        logger.info('Recall: {}'.format(total_values[RECALL]))
        log_line()
    def log_total_metrics(self):
        common_info = """
                P = condition positive (the number of real positive cases in the data)
                N = condition negative (the number of real negative cases in the data)
                TP = True Positive prediction
                TN = True Negative prediction
                FP = False Positive prediction (Type I error)
                FN = False Negative prediction (Type II error)
                Accuracy = (TP + TN)/(TP + TN + FP + FN) = TRUE/TOTAL
                Precision = TP / (TP + FP)
                Recall = TP / (TP + FN)
                F1-Measure = (2 * TP) / (2 * TP + FP + FN)
                """

        log_line()
        log_line(c='*')
        for line in common_info.split('\n'):
            line = line.strip()
            if len(line) > 0:
                logger.info(line.ljust(80))

        log_line(c='*')
        log_line()

        def print_evaluation_values(tag_pair_metrics):
            labels = [
                ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE,
                TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE
            ]
            for label in labels:
                logger.info('    {0}:   {1:2.4f}'.format(
                    label.ljust(16), tag_pair_metrics[label]))

        for i, (tag_name_gt,
                tag_metrics) in enumerate(self.get_metrics().items(), start=1):
            logger.info('{}) {} <--> {}:'.format(
                i, tag_name_gt, self._tags_mapping[tag_name_gt]))
            print_evaluation_values(tag_metrics)
            log_line()

        logger.info('Total values:')
        total_values = self.get_total_metrics()
        print_evaluation_values(total_values)
        log_line()

        log_line(c='*')
Пример #11
0
def report_dtl_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('DTL finished', extra={'event_type': EventType.DTL_APPLIED})
Пример #12
0
def report_inference_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('model applied', extra={'event_type': EventType.MODEL_APPLIED})
Пример #13
0
def report_import_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('import finished',
                extra={'event_type': EventType.IMPORT_APPLIED})
Пример #14
0
def report_agent_rpc_ready():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('Ready to get events',
                extra={'event_type': EventType.TASK_DEPLOYED})
Пример #15
0
def log_task_finished(logger):
    if logger is None:
        return
    logger.info('TASK_END', extra={'event_type': EventType.TASK_FINISHED})
    _stop_and_wait_logger(logger)
Пример #16
0
def log_tree(dir_path, logger):
    out = tree(dir_path)
    logger.info("DIRECTORY_TREE", extra={'tree': out})