예제 #1
0
파일: core.py 프로젝트: lext/deep-pipeline
def log_metrics(writer, train_loss, val_loss, conf_matrix):
    kvs = GlobalKVS()

    dices = {
        'dice_{}'.format(cls): dice
        for cls, dice in enumerate(calculate_dice(conf_matrix))
    }
    ious = {
        'iou_{}'.format(cls): iou
        for cls, iou in enumerate(calculate_iou(conf_matrix))
    }
    print(colored('==> ', 'green') + 'Metrics:')
    print(colored('====> ', 'green') + 'Train loss:', train_loss)
    print(colored('====> ', 'green') + 'Val loss:', val_loss)
    print(colored('====> ', 'green') + f'Val Dice: {dices}')
    print(colored('====> ', 'green') + f'Val IoU: {ious}')
    dices_tb = {}
    for cls in range(1, len(dices)):
        dices_tb[f"Dice [{cls}]"] = dices[f"dice_{cls}"]

    ious_tb = {}
    for cls in range(1, len(ious)):
        ious_tb[f"IoU [{cls}]"] = ious[f"iou_{cls}"]

    to_log = {'train_loss': train_loss, 'val_loss': val_loss}
    # Tensorboard logging
    writer.add_scalars(f"Losses_{kvs['args'].model}", to_log, kvs['cur_epoch'])
    writer.add_scalars('Metrics/Dice', dices_tb, kvs['cur_epoch'])
    writer.add_scalars('Metrics/IoU', ious_tb, kvs['cur_epoch'])
    # KVS logging
    to_log.update({'epoch': kvs['cur_epoch']})
    val_metrics = {'epoch': kvs['cur_epoch']}
    val_metrics.update(to_log)
    val_metrics.update(dices)
    val_metrics.update({'conf_matrix': conf_matrix})

    kvs.update(f'losses_fold_[{kvs["cur_fold"]}]', to_log)
    kvs.update(f'val_metrics_fold_[{kvs["cur_fold"]}]', val_metrics)
예제 #2
0
def evaluation_runner(args, config, save_dir):
    """
    Calculates evaluation metrics on predicted masks against target.
    :param args:
    :param config:
    :param save_dir:
    :return:
    """
    start_eval = time()

    # Evaluation arguments
    args.image_path = args.data_location / 'images'
    args.mask_path = args.data_location / 'masks'
    args.pred_path = args.data_location / 'predictions'
    args.save_dir = args.data_location / 'evaluation'
    args.save_dir.mkdir(exist_ok=True)
    args.n_labels = 2

    # Snapshots to be evaluated
    if type(save_dir) != list:
        save_dir = [save_dir]

    # Iterate through snapshots
    for snap in save_dir:

        # Initialize results
        results = {'Sample': [], 'Dice': [], 'IoU': [], 'Similarity': []}

        # Loop for samples
        (args.save_dir / ('visualizations_' + snap.name)).mkdir(exist_ok=True)
        samples = os.listdir(str(args.mask_path))
        samples.sort()
        try:
            for idx, sample in enumerate(samples):

                print(
                    f'==> Processing sample {idx + 1} of {len(samples)}: {sample}'
                )

                # Load image stacks
                if config['training']['experiment'] == '3D':
                    mask, files_mask = load(str(args.mask_path / sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)

                    pred, files_pred = load(str(args.pred_path / snap.name /
                                                sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)
                    data, files_data = load(str(args.image_path / sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)

                    # Crop in case of inconsistency
                    crop = min(pred.shape, mask.shape)
                    mask = mask[:crop[0], :crop[1], :crop[2]]
                    pred = pred[:crop[0], :crop[1], :crop[2]]

                else:
                    data = cv2.imread(str(args.image_path / sample))
                    mask = cv2.imread(str(args.mask_path / sample),
                                      cv2.IMREAD_GRAYSCALE)
                    pred = cv2.imread(str(args.pred_path / snap.name / sample),
                                      cv2.IMREAD_GRAYSCALE)
                    if pred is None:
                        sample = sample[:-4] + '.bmp'
                        pred = cv2.imread(
                            str(args.pred_path / snap.name / sample),
                            cv2.IMREAD_GRAYSCALE)
                    elif mask is None:
                        mask = cv2.imread(str(args.mask_path / sample),
                                          cv2.IMREAD_GRAYSCALE)

                    # Crop in case of inconsistency
                    crop = min(pred.shape, mask.shape)
                    mask = mask[:crop[0], :crop[1]]
                    pred = pred[:crop[0], :crop[1]]

                # Evaluate metrics
                conf_matrix = calculate_conf(pred.astype(np.bool),
                                             mask.astype(np.bool),
                                             args.n_labels)
                dice = calculate_dice(conf_matrix)[1]
                iou = calculate_iou(conf_matrix)[1]
                sim = calculate_volumetric_similarity(conf_matrix)[1]

                print(
                    f'Sample {sample}: dice = {dice}, IoU = {iou}, similarity = {sim}'
                )

                # Save predicted full mask
                if config['training']['experiment'] == '3D':
                    print_orthogonal(
                        data,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_input.png')),
                        scale_factor=1500)
                    print_orthogonal(
                        data,
                        mask=mask,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_reference.png')),
                        scale_factor=1500)
                    print_orthogonal(
                        data,
                        mask=pred,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_prediction.png')),
                        scale_factor=1500)

                # Update results
                results['Sample'].append(sample)
                results['Dice'].append(dice)
                results['IoU'].append(iou)
                results['Similarity'].append(sim)

        except AttributeError:
            print(f'Sample {sample} failing. Skipping to next one.')
            continue

        # Add average value to
        results['Sample'].append('Average values')
        results['Dice'].append(np.average(results['Dice']))
        results['IoU'].append(np.average(results['IoU']))
        results['Similarity'].append(np.average(results['Similarity']))

        # Write to excel
        writer = pd.ExcelWriter(
            str(args.save_dir / ('metrics_' + str(snap.name))) + '.xlsx')
        df1 = pd.DataFrame(results)
        df1.to_excel(writer, sheet_name='Metrics')
        writer.save()

        print(
            f'Metrics evaluated in {(time() - start_eval) // 60} minutes, {(time() - start_eval) % 60} seconds.'
        )
예제 #3
0
        gt_stack = read_3d_stack(masks) > 0.9

        iou_scores = {t: list() for t in thresholds}
        dice_scores = {t: list() for t in thresholds}
        vs_scores = {t: list() for t in thresholds}
        for pad in paddings:
            pbar.set_description(
                f'Processing sample [{sample_id} / pad {pad}]:')
            mask_zone = make_surf_vol(gt_stack, surf_pad=pad)
            masked_preds = pred_stack[:mask_zone.shape[0]] * mask_zone
            masked_gt = gt_stack[:mask_zone.shape[0]] * mask_zone
            for t in thresholds:
                conf_mat = calculate_confusion_matrix_from_arrays(
                    masked_preds > t, masked_gt, 2)
                # IoU
                iou = calculate_iou(conf_mat)[1]
                iou_scores[t].append(iou)
                # Dice
                dice = calculate_dice(conf_mat)[1]
                dice_scores[t].append(dice)
                # VD
                volumetric_sim = calculate_volumetric_similarity(conf_mat)[1]
                vs_scores[t].append(volumetric_sim)

        for t in thresholds:
            results[t].append([
                sample_id,
                'IoU',
            ] + iou_scores[t])
            results[t].append([
                sample_id,
예제 #4
0
                mask, files_mask = load(str(args.mask_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads)
            if 'subdir' in locals():
                pred, files_pred = load(str(args.prediction_path / snap / sample / subdir), rgb=False, n_jobs=args.n_threads)
            else:
                pred, files_pred = load(str(args.prediction_path / snap / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads)
            data, files_data = load(str(args.image_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads)

            # Crop in case of inconsistency
            crop = min(pred.shape, mask.shape)
            mask = mask[:crop[0], :crop[1], :crop[2]]
            pred = pred[:crop[0], :crop[1], :crop[2]]

            # Evaluate metrics
            conf_matrix = calculate_conf(pred.astype(np.bool), mask.astype(np.bool), args.n_labels)
            dice = calculate_dice(conf_matrix)[1]
            iou = calculate_iou(conf_matrix)[1]
            sim = calculate_volumetric_similarity(conf_matrix)[1]

            print(f'Sample {sample}: dice = {dice}, IoU = {iou}, similarity = {sim}')

            # Save predicted full mask
            print_orthogonal(data, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_input.png')),
                             scale_factor=1500)
            print_orthogonal(data, mask=mask, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_reference.png')),
                             scale_factor=1500)
            print_orthogonal(data, mask=pred, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_prediction.png')),
                             scale_factor=1500)