Esempio n. 1
0
def get_scores(pred, label, vxlspacing):
    """
    pred: HxWxZ (x, y, z) of boolean
    label: HxWxZ (e.g. (512,512,75))
    vxlspacing: 3-tuple of float (spacing)

    """
    volscores = {}

    volscores['dice'] = metric.dc(pred, label)
    try:
        jaccard = metric.binary.jc(pred, label)
    except ZeroDivisionError:
        jaccard = 0.0
    volscores['jaccard'] = jaccard
    volscores['voe'] = 1. - volscores['jaccard']
    try:
        rvd = metric.ravd(label, pred)
    except:
        rvd = None
    volscores['rvd'] = rvd

    if np.count_nonzero(pred) == 0 or np.count_nonzero(label) == 0:
        volscores['assd'] = 0
        volscores['msd'] = 0
    else:
        evalsurf = Surface(pred, label, physical_voxel_spacing=vxlspacing,
                           mask_offset=[0., 0., 0.], reference_offset=[0., 0., 0.])
        volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()

        volscores['msd'] = metric.hd(label, pred, voxelspacing=vxlspacing)

    return volscores
Esempio n. 2
0
def scorer(pred,label,vxlspacing):
	"""

	:param pred:
	:param label:
	:param voxelspacing:
	:return:
	"""

	volscores = {}

	volscores['dice'] = metric.dc(pred,label)
	volscores['jaccard'] = metric.binary.jc(pred,label)
	volscores['voe'] = 1. - volscores['jaccard']
	volscores['rvd'] = metric.ravd(label,pred)

	if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
		volscores['assd'] = 0
		volscores['msd'] = 0
	else:
		evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
		volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()
	
		volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)



	logging.info("\tDice " + str(volscores['dice']))
	logging.info("\tJaccard " + str(volscores['jaccard']))
	logging.info("\tVOE " + str(volscores['voe']))
	logging.info("\tRVD " + str(volscores['rvd']))
	logging.info("\tASSD " + str(volscores['assd']))
	logging.info("\tMSD " + str(volscores['msd']))

	return volscores
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
Esempio n. 5
0
def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):

    if confusion_matrix is None:
        confusion_matrix = ConfusionMatrix(test, reference)

    test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()

    if test_empty or test_full or reference_empty or reference_full:
        if nan_for_nonexisting:
            return float("NaN")
        else:
            return 0

    test, reference = confusion_matrix.test, confusion_matrix.reference

    return metric.hd(test, reference, voxel_spacing, connectivity)
Esempio n. 6
0
def get_scores(pred,label,vxlspacing):
	volscores = {}

	volscores['dice'] = metric.dc(pred,label)
	volscores['jaccard'] = metric.binary.jc(pred,label)
	volscores['voe'] = 1. - volscores['jaccard']
	volscores['rvd'] = metric.ravd(label,pred)

	if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
		volscores['assd'] = 0
		volscores['msd'] = 0
	else:
		evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
		volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()

		volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)

	return volscores
Esempio n. 7
0
def calculate_validation_metrics(probas_pred,
                                 image_gt,
                                 class_labels=None,
                                 num_classes=5):
    classes = np.arange(probas_pred.shape[-1])
    # determine valid classes (those that actually appear in image_gt). Some images may miss some classes
    classes = [c for c in classes if np.sum(image_gt == c) != 0]
    image_pred = probas_pred.argmax(-1)
    assert image_gt.shape == image_pred.shape
    accuracy = np.sum(image_gt == image_pred) / float(image_pred.size)
    class_metrics = {}
    y_true = convert_seg_flat_to_binary_label_indicator_array(
        image_gt.ravel(), num_classes).astype(int)[:, classes]
    y_pred = probas_pred.transpose(3, 0, 1,
                                   2).reshape(num_classes,
                                              -1).transpose(1, 0)[:, classes]
    scores = roc_auc_score(y_true, y_pred, None)
    for i, c in enumerate(classes):
        true_positives = metric.obj_tpr(image_gt == c, image_pred == c)
        false_positives = metric.obj_fpr(image_gt == c, image_pred == c)
        dc = metric.dc(image_gt == c, image_pred == c)
        hd = metric.hd(image_gt == c, image_pred == c)
        precision = metric.precision(image_gt == c, image_pred == c)
        recall = metric.recall(image_gt == c, image_pred == c)
        ravd = metric.ravd(image_gt == c, image_pred == c)
        assd = metric.assd(image_gt == c, image_pred == c)
        asd = metric.asd(image_gt == c, image_pred == c)
        label = c
        if class_labels is not None and c in class_labels.keys():
            label = class_labels[c]
        class_metrics[label] = {
            'true_positives': true_positives,
            'false_positives': false_positives,
            'DICE\t\t': dc,
            'Hausdorff dist': hd,
            'precision\t': precision,
            'recall\t\t': recall,
            'rel abs vol diff': ravd,
            'avg surf dist symm': assd,
            'avg surf dist\t': asd,
            'roc_auc\t\t': scores[i]
        }
    return accuracy, class_metrics
Esempio n. 8
0
def hausdorff_distance(prediction=None,
                       reference=None,
                       confusion_matrix=None,
                       nan_for_nonexisting=True,
                       voxel_spacing=None,
                       connectivity=1,
                       **kwargs):

    if confusion_matrix is None:
        confusion_matrix = ConfusionMatrix(prediction, reference)

    prediction_empty, prediction_full, reference_empty, reference_full = confusion_matrix.get_existence(
    )

    if prediction_empty or prediction_full or reference_empty or reference_full:
        if nan_for_nonexisting:
            return float("NaN")
        else:
            return 0

    prediction, reference = confusion_matrix.prediction, confusion_matrix.reference

    return metric.hd(prediction, reference, voxel_spacing, connectivity)
Esempio n. 9
0
# =============================================================================

# FUENTE: https://loli.github.io/medpy/metric.html


# Se obtienen imágenes binarias de los las imágenes donde se graficaron contornos
ret, cHull_binary = cv.threshold(cHull_justHull[:,:,1], 0, 255, cv.THRESH_BINARY)
ret, cHull_Mbinary = cv.threshold(cHull_justMelanoma[:,:,0], 0, 255, cv.THRESH_BINARY)


print(' ')

# =============================================================================
#     Distancia Hausdorff
# =============================================================================
distanceHD1 = mdm.hd(cHull_binary,cHull_Mbinary,connectivity=1)

print('Distancia Hausdorff',distanceHD1)



# =============================================================================
#   Distancia media entre superficies
# =============================================================================

distanceASD = mdm.asd(cHull_binary,cHull_Mbinary,connectivity=1)

print('Distancia Media',distanceASD)


Esempio n. 10
0
def whd(x):
	try:
		val = hd(*x)
	except RuntimeError:
		val = numpy.inf
	return val
Esempio n. 11
0
    # predict
    pred = []
    for idx in xrange(img.shape[2]):
        net1.blobs['data'].data[0, 0, ...] = img_p[..., idx]
        pred.append(net1.forward()['prob'][0, 1] > 0.5)

    pred = np.array(pred).transpose(1, 2, 0)

    # create volume instance from medpy
    v = volume.Volume(pred, lbl_p)
    # calculate metrics as in the oritinal paper
    voe = v.get_volumetric_overlap_error()
    rvd = v.get_relative_volume_difference()
    asd = metric.asd(pred, lbl_p)
    msd = metric.hd(pred, lbl_p)
    dice = metric.dc(pred, lbl_p) * 100  # convert to percentage

    perf_metrics.append([voe, rvd, asd, msd, dice])
    print('subject %d: %s' % (idx_subject, str([voe, rvd, asd, msd, dice])))

perf_metrics = np.array(perf_metrics)
perf_metrics_mean = np.mean(perf_metrics, axis=0)

print('inference complete: mean of performance metrics')
print(perf_metrics_mean)
"""
# visualize the results
for idx in xrange(30, 100, 20):
    utils.imshow(img[...,idx], img_p[..., idx], lbl_p[...,idx], pred[...,idx])
"""
def main(input_folder,
         output_folder,
         model_path,
         exp_config,
         do_postprocessing=False,
         gt_exists=True):

    # Get Data
    data_loader = data_switch(exp_config.data_identifier)
    data = data_loader(exp_config)

    # Make and restore vagan model
    segmenter_model = segmenter(
        exp_config=exp_config, data=data,
        fixed_batch_size=1)  # CRF model requires fixed batch size
    segmenter_model.load_weights(model_path, type='best_dice')

    total_time = 0
    total_volumes = 0

    dice_list = []
    assd_list = []
    hd_list = []

    for folder in os.listdir(input_folder):

        folder_path = os.path.join(input_folder, folder)

        if os.path.isdir(folder_path):

            infos = {}
            for line in open(os.path.join(folder_path, 'Info.cfg')):
                label, value = line.split(':')
                infos[label] = value.rstrip('\n').lstrip(' ')

            patient_id = folder.lstrip('patient')

            if not int(patient_id) % 5 == 0:
                continue

            ED_frame = int(infos['ED'])
            ES_frame = int(infos['ES'])

            for file in glob.glob(
                    os.path.join(folder_path, 'patient???_frame??.nii.gz')):

                logging.info(' ----- Doing image: -------------------------')
                logging.info('Doing: %s' % file)
                logging.info(' --------------------------------------------')

                file_base = file.split('.nii.gz')[0]

                frame = int(file_base.split('frame')[-1])
                img, img_affine, img_header = utils.load_nii(file)
                img = utils.normalise_image(img)
                zooms = img_header.get_zooms()

                if gt_exists:
                    file_mask = file_base + '_gt.nii.gz'
                    mask, mask_affine, mask_header = utils.load_nii(file_mask)

                start_time = time.time()

                if exp_config.dimensionality_mode == '2D':

                    pixel_size = (img_header.structarr['pixdim'][1],
                                  img_header.structarr['pixdim'][2])
                    scale_vector = (pixel_size[0] /
                                    exp_config.target_resolution[0],
                                    pixel_size[1] /
                                    exp_config.target_resolution[1])

                    predictions = []

                    nx, ny = exp_config.image_size

                    for zz in range(img.shape[2]):

                        slice_img = np.squeeze(img[:, :, zz])
                        slice_rescaled = transform.rescale(slice_img,
                                                           scale_vector,
                                                           order=1,
                                                           preserve_range=True,
                                                           multichannel=False,
                                                           mode='constant')

                        x, y = slice_rescaled.shape

                        x_s = (x - nx) // 2
                        y_s = (y - ny) // 2
                        x_c = (nx - x) // 2
                        y_c = (ny - y) // 2

                        # Crop section of image for prediction
                        if x > nx and y > ny:
                            slice_cropped = slice_rescaled[x_s:x_s + nx,
                                                           y_s:y_s + ny]
                        else:
                            slice_cropped = np.zeros((nx, ny))
                            if x <= nx and y > ny:
                                slice_cropped[x_c:x_c +
                                              x, :] = slice_rescaled[:,
                                                                     y_s:y_s +
                                                                     ny]
                            elif x > nx and y <= ny:
                                slice_cropped[:, y_c:y_c +
                                              y] = slice_rescaled[x_s:x_s +
                                                                  nx, :]
                            else:
                                slice_cropped[x_c:x_c + x, y_c:y_c +
                                              y] = slice_rescaled[:, :]

                        # GET PREDICTION
                        network_input = np.float32(
                            np.tile(np.reshape(slice_cropped, (nx, ny, 1)),
                                    (1, 1, 1, 1)))
                        mask_out, softmax = segmenter_model.predict(
                            network_input)

                        prediction_cropped = np.squeeze(softmax[0, ...])

                        # ASSEMBLE BACK THE SLICES
                        slice_predictions = np.zeros(
                            (x, y, exp_config.nlabels))
                        # insert cropped region into original image again
                        if x > nx and y > ny:
                            slice_predictions[x_s:x_s + nx, y_s:y_s +
                                              ny, :] = prediction_cropped
                        else:
                            if x <= nx and y > ny:
                                slice_predictions[:, y_s:y_s +
                                                  ny, :] = prediction_cropped[
                                                      x_c:x_c + x, :, :]
                            elif x > nx and y <= ny:
                                slice_predictions[
                                    x_s:x_s +
                                    nx, :, :] = prediction_cropped[:, y_c:y_c +
                                                                   y, :]
                            else:
                                slice_predictions[:, :, :] = prediction_cropped[
                                    x_c:x_c + x, y_c:y_c + y, :]

                        # RESCALING ON THE LOGITS
                        if gt_exists:
                            prediction = transform.resize(
                                slice_predictions,
                                (mask.shape[0], mask.shape[1],
                                 exp_config.nlabels),
                                order=1,
                                preserve_range=True,
                                mode='constant')
                        else:  # This can occasionally lead to wrong volume size, therefore if gt_exists
                            # we use the gt mask size for resizing.
                            prediction = transform.rescale(
                                slice_predictions, (1.0 / scale_vector[0],
                                                    1.0 / scale_vector[1], 1),
                                order=1,
                                preserve_range=True,
                                multichannel=False,
                                mode='constant')

                        prediction = np.uint8(np.argmax(prediction, axis=-1))
                        # import matplotlib.pyplot as plt
                        # fig = plt.Figure()
                        # for ii in range(3):
                        #     plt.subplot(1, 3, ii + 1)
                        #     plt.imshow(np.squeeze(prediction))
                        # plt.show()

                        predictions.append(prediction)

                    prediction_arr = np.transpose(
                        np.asarray(predictions, dtype=np.uint8), (1, 2, 0))

                elif exp_config.dimensionality_mode == '3D':

                    nx, ny, nz = exp_config.image_size

                    pixel_size = (img_header.structarr['pixdim'][1],
                                  img_header.structarr['pixdim'][2],
                                  img_header.structarr['pixdim'][3])

                    scale_vector = (pixel_size[0] /
                                    exp_config.target_resolution[0],
                                    pixel_size[1] /
                                    exp_config.target_resolution[1],
                                    pixel_size[2] /
                                    exp_config.target_resolution[2])

                    vol_scaled = transform.rescale(img,
                                                   scale_vector,
                                                   order=1,
                                                   preserve_range=True,
                                                   multichannel=False,
                                                   mode='constant')

                    nz_max = exp_config.image_size[2]
                    slice_vol = np.zeros((nx, ny, nz_max), dtype=np.float32)

                    nz_curr = vol_scaled.shape[2]
                    stack_from = (nz_max - nz_curr) // 2
                    stack_counter = stack_from

                    x, y, z = vol_scaled.shape

                    x_s = (x - nx) // 2
                    y_s = (y - ny) // 2
                    x_c = (nx - x) // 2
                    y_c = (ny - y) // 2

                    for zz in range(nz_curr):

                        slice_rescaled = vol_scaled[:, :, zz]

                        if x > nx and y > ny:
                            slice_cropped = slice_rescaled[x_s:x_s + nx,
                                                           y_s:y_s + ny]
                        else:
                            slice_cropped = np.zeros((nx, ny))
                            if x <= nx and y > ny:
                                slice_cropped[x_c:x_c +
                                              x, :] = slice_rescaled[:,
                                                                     y_s:y_s +
                                                                     ny]
                            elif x > nx and y <= ny:
                                slice_cropped[:, y_c:y_c +
                                              y] = slice_rescaled[x_s:x_s +
                                                                  nx, :]

                            else:
                                slice_cropped[x_c:x_c + x, y_c:y_c +
                                              y] = slice_rescaled[:, :]

                        slice_vol[:, :, stack_counter] = slice_cropped
                        stack_counter += 1

                    stack_to = stack_counter

                    network_input = np.float32(
                        np.reshape(slice_vol, (1, nx, ny, nz_max, 1)))
                    start_time = time.time()
                    mask_out, softmax = segmenter_model.predict(network_input)
                    logging.info('Classified 3D: %f secs' %
                                 (time.time() - start_time))

                    prediction_nzs = mask_out[0, :, :, stack_from:
                                              stack_to]  # non-zero-slices

                    if not prediction_nzs.shape[2] == nz_curr:
                        raise ValueError('sizes mismatch')

                    # ASSEMBLE BACK THE SLICES
                    prediction_scaled = np.zeros(
                        vol_scaled.shape)  # last dim is for logits classes

                    # insert cropped region into original image again
                    if x > nx and y > ny:
                        prediction_scaled[x_s:x_s + nx,
                                          y_s:y_s + ny, :] = prediction_nzs
                    else:
                        if x <= nx and y > ny:
                            prediction_scaled[:, y_s:y_s +
                                              ny, :] = prediction_nzs[x_c:x_c +
                                                                      x, :, :]
                        elif x > nx and y <= ny:
                            prediction_scaled[
                                x_s:x_s +
                                nx, :, :] = prediction_nzs[:, y_c:y_c + y, :]
                        else:
                            prediction_scaled[:, :, :] = prediction_nzs[
                                x_c:x_c + x, y_c:y_c + y, :]

                    logging.info('Prediction_scaled mean %f' %
                                 (np.mean(prediction_scaled)))

                    prediction = transform.resize(
                        prediction_scaled,
                        (mask.shape[0], mask.shape[1], mask.shape[2], 1),
                        order=1,
                        preserve_range=True,
                        mode='constant')
                    prediction = np.argmax(prediction, axis=-1)
                    prediction_arr = np.asarray(prediction, dtype=np.uint8)

                # This is the same for 2D and 3D again
                if do_postprocessing:
                    prediction_arr = utils.keep_largest_connected_components(
                        prediction_arr)

                elapsed_time = time.time() - start_time
                total_time += elapsed_time
                total_volumes += 1

                logging.info('Evaluation of volume took %f secs.' %
                             elapsed_time)

                if frame == ED_frame:
                    frame_suffix = '_ED'
                elif frame == ES_frame:
                    frame_suffix = '_ES'
                else:
                    raise ValueError(
                        'Frame doesnt correspond to ED or ES. frame = %d, ED = %d, ES = %d'
                        % (frame, ED_frame, ES_frame))

                # Save prediced mask
                out_file_name = os.path.join(
                    output_folder, 'prediction',
                    'patient' + patient_id + frame_suffix + '.nii.gz')
                if gt_exists:
                    out_affine = mask_affine
                    out_header = mask_header
                else:
                    out_affine = img_affine
                    out_header = img_header

                logging.info('saving to: %s' % out_file_name)
                utils.save_nii(out_file_name, prediction_arr, out_affine,
                               out_header)

                # Save image data to the same folder for convenience
                image_file_name = os.path.join(
                    output_folder, 'image',
                    'patient' + patient_id + frame_suffix + '.nii.gz')
                logging.info('saving to: %s' % image_file_name)
                utils.save_nii(image_file_name, img, out_affine, out_header)

                if gt_exists:

                    # Save GT image
                    gt_file_name = os.path.join(
                        output_folder, 'ground_truth',
                        'patient' + patient_id + frame_suffix + '.nii.gz')
                    logging.info('saving to: %s' % gt_file_name)
                    utils.save_nii(gt_file_name, mask, out_affine, out_header)

                    # Save difference mask between predictions and ground truth
                    difference_mask = np.where(
                        np.abs(prediction_arr - mask) > 0, [1], [0])
                    difference_mask = np.asarray(difference_mask,
                                                 dtype=np.uint8)
                    diff_file_name = os.path.join(
                        output_folder, 'difference',
                        'patient' + patient_id + frame_suffix + '.nii.gz')
                    logging.info('saving to: %s' % diff_file_name)
                    utils.save_nii(diff_file_name, difference_mask, out_affine,
                                   out_header)

                # calculate metrics
                y_ = prediction_arr
                y = mask

                per_lbl_dice = []
                per_lbl_assd = []
                per_lbl_hd = []

                for lbl in [3, 1, 2]:  #range(exp_config.nlabels):

                    binary_pred = (y_ == lbl) * 1
                    binary_gt = (y == lbl) * 1

                    if np.sum(binary_gt) == 0 and np.sum(binary_pred) == 0:
                        per_lbl_dice.append(1)
                        per_lbl_assd.append(0)
                        per_lbl_hd.append(0)
                    elif np.sum(binary_pred) > 0 and np.sum(
                            binary_gt) == 0 or np.sum(
                                binary_pred) == 0 and np.sum(binary_gt) > 0:
                        logging.warning(
                            'Structure missing in either GT (x)or prediction. ASSD and HD will not be accurate.'
                        )
                        per_lbl_dice.append(0)
                        per_lbl_assd.append(1)
                        per_lbl_hd.append(1)
                    else:
                        per_lbl_dice.append(dc(binary_pred, binary_gt))
                        per_lbl_assd.append(
                            assd(binary_pred, binary_gt, voxelspacing=zooms))
                        per_lbl_hd.append(
                            hd(binary_pred, binary_gt, voxelspacing=zooms))

                dice_list.append(per_lbl_dice)
                assd_list.append(per_lbl_assd)
                hd_list.append(per_lbl_hd)

    logging.info('Average time per volume: %f' % (total_time / total_volumes))

    dice_arr = np.asarray(dice_list)
    assd_arr = np.asarray(assd_list)
    hd_arr = np.asarray(hd_list)

    mean_per_lbl_dice = dice_arr.mean(axis=0)
    mean_per_lbl_assd = assd_arr.mean(axis=0)
    mean_per_lbl_hd = hd_arr.mean(axis=0)

    logging.info('Dice')
    logging.info(mean_per_lbl_dice)
    logging.info(np.mean(mean_per_lbl_dice))
    logging.info('ASSD')
    logging.info(mean_per_lbl_assd)
    logging.info(np.mean(mean_per_lbl_assd))
    logging.info('HD')
    logging.info(mean_per_lbl_hd)
    logging.info(np.mean(mean_per_lbl_hd))
Esempio n. 13
0
    name_sp = name.split(sep='_')
    name_num = re.findall('\d+', name_sp[1])[0]
#    name_num = name[3:5]
    gt_nii = nb.load(os.path.join(gt_path, name_sp[0] + '_' + name_num + '.nii.gz'))
    gt = gt_nii.get_data() # tumorgt
    vxlspacing = gt_nii.header.get_zooms()
    seg = nb.load(os.path.join(seg_path, name)).get_data()
    seg = np.squeeze(seg)
#    seg = np.uint8(seg>1)
    if np.max(seg)>0:
#        gt = gt>1
#        seg = getLargestCC(seg>0)
        seg_dice = round(metric.dc(seg>0, gt), 4)
        seg_RAVD = round(metric.ravd(seg, gt), 4)   
        seg_ASSD = round(metric.binary.assd(seg, gt, vxlspacing), 4)
        seg_MSSD = round(metric.hd(seg, gt, vxlspacing), 4)
    else:
        seg_dice = 0.0
        seg_RAVD = 0.0  
        seg_ASSD = 0.0
        seg_MSSD = 0.0
    
    
    seg_measures['image'].append(name)
    seg_measures['Dice'].append(seg_dice)
    seg_measures['RAVD'].append(seg_RAVD)
    seg_measures['ASSD'].append(seg_ASSD)
    seg_measures['MSSD'].append(seg_MSSD)
    print(name, seg_dice)
    
print('Average Dice: ', sum(seg_measures['Dice'])/len(filenames))
Esempio n. 14
0
     iris_prediction = cv2.resize(
         iris_prediction, (original_shape[1], original_shape[0]),
         interpolation=cv2.INTER_LINEAR)
 _, iris_prediction = cv2.threshold(iris_prediction, 0.5, 1, 0)
 iris_prediction = keep_large_area(iris_prediction,
                                   top_n_large=1).astype(np.uint8)
 outer_prediction = (1 - flood(iris_prediction,
                               (0, 0))).astype(np.uint8)
 inner_prediction = (outer_prediction - iris_prediction).astype(
     np.uint8)
 if args.ellipse:
     inner_prediction = fit_Ellipse(inner_prediction).astype(np.uint8)
     outer_prediction = fit_Ellipse(outer_prediction).astype(np.uint8)
 Dice = compute_dice(outer_prediction - inner_prediction,
                     local_outer - local_inner)
 HD = hd(outer_prediction - inner_prediction, local_outer - local_inner)
 print(i, img_name, Dice, HD)
 name_all.append(path)
 Dice_all.append(Dice)
 HD_all.append(HD)
 inner_save = np.zeros(inner_prediction.shape)
 contours, _ = cv2.findContours(inner_prediction, cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_NONE)
 cv2.drawContours(inner_save, contours, -1, 255, 1)
 outer_save = np.zeros(outer_prediction.shape)
 contours, _ = cv2.findContours(outer_prediction, cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_NONE)
 cv2.drawContours(outer_save, contours, -1, 255, 1)
 io.imsave(
     os.path.join(new_dir, 'inner_predictions',
                  os.path.splitext(img_name)[0] + '.png'),
Esempio n. 15
0
    def process(self, inputs, outputs):
        for input, output in zip(inputs, outputs):
            self.count += len(output['instances'])
            gt_segm = self.annotations[input['file_name']]['segm_per_region']
            try:
                _ = output['instances'].pred_masks
            except AttributeError:
                continue
            pred_segm = downsample_points(output)
            doc_ahd = {cat: [] for cat in categories_list}
            doc_hd = {cat: [] for cat in categories_list}
            doc_hd95 = {cat: [] for cat in categories_list}
            doc_iou = {cat: [] for cat in categories_list}
            doc_acc = {cat: [] for cat in categories_list}
            for reg_type in range(len(categories_list)):
                gt, pred = gt_segm[reg_type], pred_segm[reg_type]

                # Both have points
                if len(gt) and len(pred):
                    gt_mask = np.zeros((input['height'], input['width']),
                                       dtype=np.int8)
                    for i in gt:
                        cv2.fillPoly(gt_mask,
                                     np.array([i]).astype(np.int32), 1)
                    pred_mask = np.zeros((input['height'], input['width']),
                                         dtype=np.int8)
                    for i in pred:
                        cv2.fillPoly(pred_mask,
                                     np.array([i]).astype(np.int32), 1)
                    gt_mask = gt_mask.astype(np.uint8)
                    gt_mask = (gt_mask * 255).astype(np.uint8)
                    pred_mask = pred_mask.astype(np.uint8)
                    pred_mask = (pred_mask * 255).astype(np.uint8)

                    def compute_iou_and_accuracy(arrs, edge_mask1):
                        intersection = cv2.bitwise_and(arrs, edge_mask1)
                        union = cv2.bitwise_or(arrs, edge_mask1)
                        intersection_sum = np.sum(intersection)
                        union_sum = np.sum(union)
                        iou = (intersection_sum) / (union_sum)
                        total = np.sum(arrs)
                        correct_predictions = intersection_sum
                        accuracy = correct_predictions / total
                        # print(iou, accuracy)
                        return iou, accuracy

                    res_iou, res_accuracy = compute_iou_and_accuracy(
                        pred_mask, gt_mask)
                    res_ahd, res_hd, res_hd95 = assd(pred_mask, gt_mask), hd(
                        pred_mask, gt_mask), hd95(pred_mask, gt_mask)
                    self.ahd[categories_list[reg_type]].append(res_ahd)
                    self.hd[categories_list[reg_type]].append(res_hd)
                    self.hd95[categories_list[reg_type]].append(res_hd95)
                    self.hd[categories_list[reg_type]].append(
                        hd(pred_mask, gt_mask))
                    self.iou[categories_list[reg_type]].append(res_iou)
                    self.acc[categories_list[reg_type]].append(res_accuracy)

                    doc_ahd[categories_list[reg_type]].append(res_ahd)
                    doc_hd[categories_list[reg_type]].append(res_hd)
                    doc_hd95[categories_list[reg_type]].append(res_hd95)
                    doc_hd[categories_list[reg_type]].append(
                        hd(pred_mask, gt_mask))
                    doc_iou[categories_list[reg_type]].append(res_iou)
                    doc_acc[categories_list[reg_type]].append(res_accuracy)
                # One has points
                # elif len(gt) ^ len(pred):
                #     total_area = 0
                #     for each_pred in pred:
                #         total_area += PolyArea(each_pred[:, 0], each_pred[:, 1])
                #     hd = total_area / 100
                #     self.ahd[categories_list[reg_type]].append(hd)
                #     self.hd[categories_list[reg_type]].append(hd)
                #     self.hd95[categories_list[reg_type]].append(hd)
                # self.iou[categories_list[reg_type]].append(0)
                # self.acc[categories_list[reg_type]].append(0)
                # Both Empty
                # elif len(gt) == 0 and len(pred) != 0:

                else:
                    # self.hd[categories_list[reg_type]].append(0)
                    pass
            # print("Over for doc")
            total_ahd = list()
            for l in doc_ahd.values():
                total_ahd.extend(l)
            doc_ahd = np.mean(total_ahd)

            total_hd = list()
            for l in doc_hd.values():
                total_hd.extend(l)
            doc_hd = np.mean(total_hd)

            total_hd95 = list()
            for l in doc_hd95.values():
                total_hd95.extend(l)
            doc_hd95 = np.mean(total_hd95)

            total_iou = list()
            for l in doc_iou.values():
                total_iou.extend(l)
            doc_iou = np.mean(total_iou)

            total_acc = list()
            for l in doc_acc.values():
                total_acc.extend(l)
            doc_acc = np.mean(total_acc)

            self.doc_wise[input['file_name']] = {
                "AHD": doc_ahd,
                "IOU": doc_iou,
                "HD": doc_hd,
                "HD95": doc_hd95,
                "ACC": doc_acc
            }
def main(model_path, exp_config, do_plots=False):

    # Get Data
    data_loader = data_switch(exp_config.data_identifier)
    data = data_loader(exp_config)

    # Make and restore vagan model
    segmenter_model = segmenter(exp_config=exp_config, data=data, fixed_batch_size=1)  # CRF model requires fixed batch size
    segmenter_model.load_weights(model_path, type='best_dice')

    # Run predictions in an endless loop
    dice_list = []
    assd_list = []
    hd_list = []

    for ii, batch in enumerate(data.test.iterate_batches(1)):

        if ii % 100 == 0:
            logging.info("Progress: %d" % ii)

        x, y = batch

        y_ = segmenter_model.predict(x)[0]

        per_lbl_dice = []
        per_lbl_assd = []
        per_lbl_hd = []
        per_pixel_preds = []
        per_pixel_gts = []

        if do_plots and not sys_config.running_on_gpu_host:
            fig = plt.figure()
            fig.add_subplot(131)
            plt.imshow(np.squeeze(x), cmap='gray')
            fig.add_subplot(132)
            plt.imshow(np.squeeze(y_))
            fig.add_subplot(133)
            plt.imshow(np.squeeze(y))
            plt.show()

        for lbl in range(exp_config.nlabels):

            binary_pred = (y_ == lbl) * 1
            binary_gt = (y == lbl) * 1

            if np.sum(binary_gt) == 0 and np.sum(binary_pred) == 0:
                per_lbl_dice.append(1)
                per_lbl_assd.append(0)
                per_lbl_hd.append(0)
            elif np.sum(binary_pred) > 0 and np.sum(binary_gt) == 0 or np.sum(binary_pred) == 0 and np.sum(binary_gt) > 0:
                logging.warning('Structure missing in either GT (x)or prediction. ASSD and HD will not be accurate.')
                per_lbl_dice.append(0)
                per_lbl_assd.append(1)
                per_lbl_hd.append(1)
            else:
                per_lbl_dice.append(dc(binary_pred, binary_gt))
                per_lbl_assd.append(assd(binary_pred, binary_gt))
                per_lbl_hd.append(hd(binary_pred, binary_gt))

        dice_list.append(per_lbl_dice)
        assd_list.append(per_lbl_assd)
        hd_list.append(per_lbl_hd)

        per_pixel_preds.append(y_.flatten())
        per_pixel_gts.append(y.flatten())

    dice_arr = np.asarray(dice_list)
    assd_arr = np.asarray(assd_list)
    hd_arr = np.asarray(hd_list)

    mean_per_lbl_dice = dice_arr.mean(axis=0)
    mean_per_lbl_assd = assd_arr.mean(axis=0)
    mean_per_lbl_hd = hd_arr.mean(axis=0)

    logging.info('Dice')
    logging.info(structures_dict)
    logging.info(mean_per_lbl_dice)
    logging.info(np.mean(mean_per_lbl_dice))
    logging.info('foreground mean: %f' % (np.mean(mean_per_lbl_dice[1:])))
    logging.info('ASSD')
    logging.info(structures_dict)
    logging.info(mean_per_lbl_assd)
    logging.info(np.mean(mean_per_lbl_assd))
    logging.info('HD')
    logging.info(structures_dict)
    logging.info(mean_per_lbl_hd)
    logging.info(np.mean(mean_per_lbl_hd))