예제 #1
0
def AverageSurfaceDist(pred, gt, replace_NaN=100):
    ASD = {}
    bg_asd = asd(pred == 0, gt == 0)
    if 1 in np.unique(pred):
        csf_asd = asd(pred == 1, gt == 1)
    else:
        csf_asd = replace_NaN
    if 2 in np.unique(pred):
        gm_asd = asd(pred == 2, gt == 2)
    else:
        gm_asd = replace_NaN
    if 3 in np.unique(pred):
        wm_asd = asd(pred == 3, gt == 3)
    else:
        wm_asd = replace_NaN
    if 4 in np.unique(pred):
        tm_asd = asd(pred == 4, gt == 4)
    else:
        tm_asd = replace_NaN

    ASD['avg'] = (csf_asd + gm_asd + wm_asd + tm_asd) / 4
    ASD['bg'] = bg_asd
    ASD['csf'] = csf_asd
    ASD['gm'] = gm_asd
    ASD['wm'] = wm_asd
    ASD['tm'] = tm_asd

    return ASD
예제 #2
0
def update_eval_metrics(preds, labels, eval_metrics):

    if len(labels.shape) == 2:
        preds = np.expand_dims(preds, axis=0)
        labels = np.expand_dims(labels, axis=0)

    N = labels.shape[0]

    for i in range(N):
        pred = preds[i, :, :]
        label = labels[i, :, :]
        eval_metrics['dice score'].append(dc(pred, label))
        eval_metrics['precision'].append(precision(pred, label))
        eval_metrics['recall'].append(recall(pred, label))
        eval_metrics['sensitivity'].append(sensitivity(pred, label))
        eval_metrics['specificity'].append(specificity(pred, label))

        if np.sum(pred) > 0 and np.sum(label) > 0:
            eval_metrics['hausdorff'].append(hd(pred, label))
            eval_metrics['hausdorff 95%'].append(hd95(pred, label))
            eval_metrics['asd'].append(asd(pred, label))
            eval_metrics['assd'].append(assd(pred, label))
            eval_metrics['jaccard'].append(jc(pred, label))
        else:
            eval_metrics['hausdorff'].append('nan')
            eval_metrics['hausdorff 95%'].append('nan')
            eval_metrics['asd'].append('nan')
            eval_metrics['assd'].append('nan')
            eval_metrics['jaccard'].append('nan')

    return eval_metrics
예제 #3
0
def metrics2(img_gt, img_pred, apply_hd=False, apply_asd=False):
    """
    evaluate the models on mmwhs data in batches
    :param img_gt: the ground truth
    :param img_pred: the prediction
    :param apply_hd: whether to evaluate Hausdorff Distance
    :param apply_asd: whether to evaluate Average Surface Distance
    :return:
    """
    if img_gt.ndim != img_pred.ndim:
        raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
                         "same dimension, {} against {}".format(img_gt.ndim,
                                                                img_pred.ndim))
    res = {}
    class_name = ["myo", "la", "lv", "aa"]
    # Loop on each classes of the input images
    for c, cls_name in zip([1, 2, 3, 4], class_name) :

        gt_c_i = np.where(img_gt == c, 1, 0)
        pred_c_i = np.where(img_pred == c, 1, 0)

        # Compute the Dice
        dice = dc(gt_c_i, pred_c_i)
        h_d, a_sd = 0, 0
        if apply_hd:
            h_d = hd(gt_c_i, pred_c_i)
        if apply_asd:
            a_sd = asd (gt_c_i, pred_c_i)
        res[cls_name] = [dice, h_d, a_sd]
    return res
예제 #4
0
def asd(predictions, labels, num_classes):
    """Calculates the categorical Dice similarity coefficients for each class
        between labels and predictions.

    Args:
        predictions (np.ndarray): predictions
        labels (np.ndarray): labels
        num_classes (int): number of classes to calculate the dice
            coefficient for

    Returns:
        np.ndarray: dice coefficient per class
    """

    dice_scores = np.zeros((num_classes))

    import SimpleITK as sitk

    p = sitk.GetImageFromArray(predictions.astype(np.uint8))
    l = sitk.GetImageFromArray(labels.astype(np.uint8))

    for i in range(num_classes):
        lTestImage = sitk.BinaryThreshold(p, i, i, 1, 0)
        lResultImage = sitk.BinaryThreshold(l, i, i, 1, 0)

        pT = sitk.GetArrayFromImage(lTestImage)
        lT = sitk.GetArrayFromImage(lResultImage)

        from medpy.metric.binary import asd
        asd_value = asd(pT, lT)

        dice_scores[i] = asd_value

    return dice_scores.astype(np.float32)
예제 #5
0
def metrics(img_gt, img_pred, ifhd=True, ifasd=True):
    from medpy.metric.binary import hd, dc, asd
    if img_gt.ndim != img_pred.ndim:
        raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
                         "same dimension, {} against {}".format(
                             img_gt.ndim, img_pred.ndim))

    res = []
    cat = {'Myo', 'LA-blood', 'LV-blood', 'AA'}
    for c in range(len(cat)):
        # Copy the gt image to not alterate the input
        gt_c_i = np.where(img_gt == c + 1, 1, 0)
        # Copy the pred image to not alterate the input
        pred_c_i = np.where(img_pred == c + 1, 1, 0)
        # Clip the value to compute the volumes
        gt_c_i = np.clip(gt_c_i, 0, 1)
        pred_c_i = np.clip(pred_c_i, 0, 1)

        # Compute the Dice
        dice = dc(gt_c_i, pred_c_i)
        try:
            h_d = hd(gt_c_i, pred_c_i) if ifhd else -1
        except:
            h_d = -1
        try:
            a_sd = asd(gt_c_i, pred_c_i) if ifasd else -1
        except:
            a_sd = -1
        res += [dice, h_d, a_sd]

    return res
예제 #6
0
def channelwise_asd(pred, target):
    """ calculate channel-wise average symmetric surface distance """
    max_asd = 2 * target.shape[1]
    channel_res = []
    for c_inx in range(0, len(pred)):
        if np.sum(target[c_inx]) != 0:
            if np.sum(pred[c_inx]) == 0:
                res = max_asd
            else:
                res = asd(pred[c_inx], target[c_inx])

            channel_res.append(res)

    mean_asd = sum(channel_res) / len(channel_res)

    return mean_asd
예제 #7
0
def evaluate(img_gt, img_pred, apply_hd=False, apply_asd=False):
    """
    Function to compute the metrics between two segmentation maps given as input.
    :param img_gt: Array of the ground truth segmentation map.
    :param img_pred: Array of the predicted segmentation map.
    :param apply_hd: whether to compute Hausdorff Distance.
    :param apply_asd: Whether to compute Average Surface Distance.
    :return: A list of metrics in this order, [dice myo, hd myo, asd myo, dice lv, hd lv asd lv, dice rv, hd rv, asd rv]
    """

    if img_gt.ndim != img_pred.ndim:
        raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
                         "same dimension, {} against {}".format(img_gt.ndim,
                                                                img_pred.ndim))

    res = {}
    class_name = ["myo", "lv", "rv"]
    # Loop on each classes of the input images
    for c, cls_name in zip([1, 2, 3], class_name) :
        # Copy the gt image to not alterate the input
        gt_c_i = np.copy(img_gt)
        gt_c_i[gt_c_i != c] = 0

        # Copy the pred image to not alterate the input
        pred_c_i = np.copy(img_pred)
        pred_c_i[pred_c_i != c] = 0

        # Clip the value to compute the volumes
        gt_c_i = np.clip(gt_c_i, 0, 1)
        pred_c_i = np.clip(pred_c_i, 0, 1)

        # Compute the Dice
        dice = dc(gt_c_i, pred_c_i)
        h_d, a_sd = 0, 0
        if apply_hd:
            h_d = hd(gt_c_i, pred_c_i)
        if apply_asd:
            a_sd = asd (gt_c_i, pred_c_i)

        # Compute volume
        res[cls_name] = [dice, h_d, a_sd]

    return res
예제 #8
0
    def metrics(img_gt, img_pred, ifhd=True, ifasd=True):
        """
        Function to compute the metrics between two segmentation maps given as input.

        img_gt: Array of the ground truth segmentation map.

        img_pred: Array of the predicted segmentation map.
        Return: A list of metrics in this order, [Dice endo, HD endo, ASD endo, Dice RV, HD RV, ASD RV, Dice MYO, HD MYO, ASD MYO]
        """

        if img_gt.ndim != img_pred.ndim:
            raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
                             "same dimension, {} against {}".format(img_gt.ndim,
                                                                    img_pred.ndim))
        res = []
        # cat = {500: 'endo', 600: 'rv', 200: 'myo'}
        for c in [500, 600, 200]:
            # Copy the gt image to not alterate the input
            gt_c_i = np.copy(img_gt)
            gt_c_i[gt_c_i != c] = 0

            # Copy the pred image to not alterate the input
            pred_c_i = np.copy(img_pred)
            pred_c_i[pred_c_i != c] = 0

            # Clip the value to compute the volumes
            gt_c_i = np.clip(gt_c_i, 0, 1)
            pred_c_i = np.clip(pred_c_i, 0, 1)
            # Compute the Dice
            dice = dc(gt_c_i, pred_c_i)

            h_d, a_sd = -1, -1
            if ifhd or ifasd:
                if np.sum(gt_c_i) == 0 or np.sum(pred_c_i) == 0:
                    dice = -1
                    h_d = -1
                    a_sd = -1
                else:
                    h_d = hd(gt_c_i, pred_c_i) if ifhd else h_d
                    a_sd = asd(gt_c_i, pred_c_i) if ifasd else a_sd
            res += [dice, h_d, a_sd]

        return res
예제 #9
0
def slicewise_asd(pred, target, n_classes=3):
    """ calculate slice-wise average symmetric surface distance
        :param pred: ndarray with size [H, W],  predicted bound
        :param target: ndarray with size [H, W], ground truth
        :param n_classes: int, # of classes
    """

    slice_res = []
    max_asd = 2 * target.shape[0]
    for c_inx in range(1, n_classes):
        pred_cinx = (pred == c_inx)
        target_cinx = (target == c_inx)
        if np.sum(target_cinx) != 0:
            if np.sum(pred_cinx) == 0:
                res = max_asd
            else:
                res = asd(pred_cinx, target_cinx)

            slice_res.append(res)

    mean_res = sum(slice_res) / len(slice_res)

    return mean_res
예제 #10
0
def compute_scores(preds, labels):
    preds_data = preds.data.cpu().numpy()
    labels_data = labels.data.cpu().numpy()

    dice_score = dc(preds_data, labels_data)
    jaccard_coef = jc(preds_data, labels_data)
    hausdorff_dist = hd(preds_data, labels_data)
    asd_score = asd(preds_data, labels_data)
    assd_score = assd(preds_data, labels_data)
    precision_value = precision(preds_data, labels_data)
    recall_value = recall(preds_data, labels_data)
    sensitivity_value = sensitivity(preds_data, labels_data)
    specificity_value = specificity(preds_data, labels_data)
    return {
        'dice score': dice_score,
        'jaccard': jaccard_coef,
        'hausdorff': hausdorff_dist,
        'asd': asd_score,
        'assd': assd_score,
        'precision': precision_value,
        'recall': recall_value,
        'sensitivity': sensitivity_value,
        'specificity': specificity_value
    }
예제 #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-file', type=str, default='./logs/test1/20190506_221021.177567/checkpoint_200.pth.tar', help='Model path')
    parser.add_argument('--datasetTest', type=list, default=[1], help='test folder id contain images ROIs to test')
    parser.add_argument('--dataset', type=str, default='test', help='test folder id contain images ROIs to test')
    parser.add_argument('-g', '--gpu', type=int, default=0)

    parser.add_argument('--data-dir', default='../../../../Dataset/Fundus/', help='data root path')
    parser.add_argument('--out-stride', type=int, default=16, help='out-stride of deeplabv3+',)
    parser.add_argument('--sync-bn', type=bool, default=False, help='sync-bn in deeplabv3+')
    parser.add_argument('--freeze-bn', type=bool, default=False, help='freeze batch normalization of deeplabv3+')
    parser.add_argument('--movingbn', type=bool, default=False, help='moving batch normalization of deeplabv3+ in the test phase',)
    parser.add_argument('--test-prediction-save-path', type=str, default='./results/rebuttle-0401/', help='Path root for test image and mask')
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    model_file = args.model_file
    output_path = os.path.join(args.test_prediction_save_path, 'test' + str(args.datasetTest[0]), args.model_file.split('/')[-2])

    # 1. dataset
    composed_transforms_test = transforms.Compose([
        tr.Normalize_tf(),
        tr.ToTensor()
    ])
    db_test = DL.FundusSegmentation(base_dir=args.data_dir, phase='test', splitid=args.datasetTest,
                                    transform=composed_transforms_test, state='prediction')
    batch_size = 12
    test_loader = DataLoader(db_test, batch_size=batch_size, shuffle=False, num_workers=1)

    # 2. model
    model = DeepLab(num_classes=2, backbone='mobilenet', output_stride=args.out_stride,
                    sync_bn=args.sync_bn, freeze_bn=args.freeze_bn).cuda()

    if torch.cuda.is_available():
        model = model.cuda()
    print('==> Loading %s model file: %s' %
          (model.__class__.__name__, model_file))
    # model_data = torch.load(model_file)

    checkpoint = torch.load(model_file)
    pretrained_dict = checkpoint['model_state_dict']
    model_dict = model.state_dict()
    # 1. filter out unnecessary keys
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    # 2. overwrite entries in the existing state dict
    model_dict.update(pretrained_dict)
    # 3. load the new state dict
    model.load_state_dict(model_dict)

    if args.movingbn:
        model.train()
    else:
        model.eval()

    val_cup_dice = 0.0
    val_disc_dice = 0.0
    total_hd_OC = 0.0
    total_hd_OD = 0.0
    total_asd_OC = 0.0
    total_asd_OD = 0.0
    timestamp_start = datetime.now(pytz.timezone('Asia/Hong_Kong'))
    total_num = 0
    OC = []
    OD = []

    for batch_idx, (sample) in tqdm.tqdm(enumerate(test_loader),total=len(test_loader),ncols=80, leave=False):
        data = sample['image']
        target = sample['label']
        img_name = sample['img_name']
        if torch.cuda.is_available():
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)

        prediction, dc, sel, _ = model(data)
        prediction = torch.nn.functional.interpolate(prediction, size=(target.size()[2], target.size()[3]), mode="bilinear")
        data = torch.nn.functional.interpolate(data, size=(target.size()[2], target.size()[3]), mode="bilinear")

        target_numpy = target.data.cpu()
        imgs = data.data.cpu()
        hd_OC = 100
        asd_OC = 100
        hd_OD = 100
        asd_OD = 100
        for i in range(prediction.shape[0]):
            prediction_post = postprocessing(prediction[i], dataset=args.dataset)
            cup_dice, disc_dice = dice_coeff_2label(prediction_post, target[i])
            OC.append(cup_dice)
            OD.append(disc_dice)
            if np.sum(prediction_post[0, ...]) < 1e-4:
                hd_OC = 100
                asd_OC = 100
            else:
                hd_OC = binary.hd95(np.asarray(prediction_post[0, ...], dtype=np.bool),
                                    np.asarray(target_numpy[i, 0, ...], dtype=np.bool))
                asd_OC = binary.asd(np.asarray(prediction_post[0, ...], dtype=np.bool),
                                    np.asarray(target_numpy[i, 0, ...], dtype=np.bool))
            if np.sum(prediction_post[0, ...]) < 1e-4:
                hd_OD = 100
                asd_OD = 100
            else:
                hd_OD = binary.hd95(np.asarray(prediction_post[1, ...], dtype=np.bool),
                                    np.asarray(target_numpy[i, 1, ...], dtype=np.bool))

                asd_OD = binary.asd(np.asarray(prediction_post[1, ...], dtype=np.bool),
                                    np.asarray(target_numpy[i, 1, ...], dtype=np.bool))
            val_cup_dice += cup_dice
            val_disc_dice += disc_dice
            total_hd_OC += hd_OC
            total_hd_OD += hd_OD
            total_asd_OC += asd_OC
            total_asd_OD += asd_OD
            total_num += 1
            for img, lt, lp in zip([imgs[i]], [target_numpy[i]], [prediction_post]):
                img, lt = utils.untransform(img, lt)
                save_per_img(img.numpy().transpose(1, 2, 0),
                             output_path,
                             img_name[i],
                             lp, lt, mask_path=None, ext="bmp")

    print('OC:', OC)
    print('OD:', OD)
    import csv
    with open('Dice_results.csv', 'a+') as result_file:
        wr = csv.writer(result_file, dialect='excel')
        for index in range(len(OC)):
            wr.writerow([OC[index], OD[index]])

    val_cup_dice /= total_num
    val_disc_dice /= total_num
    total_hd_OC /= total_num
    total_asd_OC /= total_num
    total_hd_OD /= total_num
    total_asd_OD /= total_num

    print('''\n==>val_cup_dice : {0}'''.format(val_cup_dice))
    print('''\n==>val_disc_dice : {0}'''.format(val_disc_dice))
    print('''\n==>average_hd_OC : {0}'''.format(total_hd_OC))
    print('''\n==>average_hd_OD : {0}'''.format(total_hd_OD))
    print('''\n==>ave_asd_OC : {0}'''.format(total_asd_OC))
    print('''\n==>average_asd_OD : {0}'''.format(total_asd_OD))
    with open(osp.join(output_path, '../test' + str(args.datasetTest[0]) + '_log.csv'), 'a') as f:
        elapsed_time = (
                datetime.now(pytz.timezone('Asia/Hong_Kong')) -
                timestamp_start).total_seconds()
        log = [['batch-size: '] + [batch_size] + [args.model_file] + ['cup dice coefficence: '] + \
               [val_cup_dice] + ['disc dice coefficence: '] + \
               [val_disc_dice] + ['average_hd_OC: '] + \
               [total_hd_OC] + ['average_hd_OD: '] + \
               [total_hd_OD] + ['ave_asd_OC: '] + \
               [total_asd_OC] + ['average_asd_OD: '] + \
               [total_asd_OD] + [elapsed_time]]
        log = map(str, log)
        f.write(','.join(log) + '\n')
예제 #12
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()
    ######################
    # Gnerate the tfRecorder data #
    ######################
    datareader,Volshape,rindex,spmap,labelOrg,imgOrg=EvalSample_Gnerate(FLAGS.dataset_dir, 'Glabel.nrrd')
    Patchsize=len(rindex)
    ######################
    # Select the dataset #
    ######################

    dataset = dataset_factory.get_dataset(
        FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir,file_pattern=FLAGS.file_name,Datasize=Patchsize)

    ####################
    # Select the model #
    ####################
    #num_classes=2

    with tf.Graph().as_default():
        network_fn = nets_factory.get_network_fn(
                FLAGS.model_name,
                num_classes=(dataset.num_classes - FLAGS.labels_offset),
                is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################

    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])
    # label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
                 preprocessing_name,
                 is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
    image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

    images, labels = tf.train.batch(
             [image, label],
             batch_size=FLAGS.batch_size,
             num_threads=FLAGS.num_preprocessing_threads,
             capacity=5 * FLAGS.batch_size)

     ###################
    # Define the model #
    ####################

    logits, end_points = network_fn(images)
    probabilities = tf.nn.softmax(logits)
    pred = tf.argmax(logits, dimension=1)
    # if FLAGS.moving_average_decay:
    #   variable_averages = tf.train.ExponentialMovingAverage(
    #       FLAGS.moving_average_decay, tf_global_step)
    #   variables_to_restore = variable_averages.variables_to_restore(
    #       slim.get_model_variables())
    #   variables_to_restore[tf_global_step.op.name] = tf_global_step
    # else:
    #   variables_to_restore = slim.get_variables_to_restore()

    # #predictions = tf.argmax(logits, 1)
    # labels = tf.squeeze(labels)
    #
    # # Define the metrics:
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
    #     'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
    #     'Recall@5': slim.metrics.streaming_recall_at_k(
    #         logits, labels, 5),
    # })
    #
    # # Print the summaries to screen.
    # summary_ops = []
    # for name, value in names_to_values.items():
    #   summary_name = 'eval/%s' % name
    #   op = tf.scalar_summary(summary_name, value, collections=[])
    #   op = tf.Print(op, [value], summary_name)
    #   tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
    #   summary_ops.append(op)
    # # TODO(sguada) use num_epochs=1
    # if FLAGS.max_num_batches:
    #   num_batches = FLAGS.max_num_batches
    # else:
    #   # This ensures that we make a single pass over all of the data.
    #   num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
            checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)
    init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_path),
                slim.get_model_variables())# vriable name???

    #Volshape=(50,61,61)
    imgshape=spmap.shape
    segResult=np.zeros(imgshape)
    groundtruth=np.zeros(imgshape)
    segPromap=np.zeros(imgshape)
    segPromapEdge=np.zeros(imgshape)
    PreMap=[]
    labellist=[]
    seglist=[]
    conv1list=[]
    conv2list=[]
    imgorglist=[]
    fclist=[]
    with tf.Session() as sess:
            # Load weights
            init_fn(sess)
           # sess.run(images.initializer, feed_dict)
            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            num_iter = int(math.ceil(dataset.num_samples/float(FLAGS.batch_size)))
            step = 0

            try:
                while step < num_iter and not coord.should_stop():
                    # Run evaluation steps or whatever
                    segmentation,log,pmap,labelmap,imgpre,conv1,conv2,fc3= sess.run([pred,logits,probabilities,labels,images,end_points['conv1'],end_points['conv3'],end_points['fc3']])
                    step+=1
                    PreMap.extend(pmap)
                    conv1list.extend(conv1)
                    conv2list.extend(conv2)
                    fclist.extend(fc3)
                    imgorglist.extend(imgpre)
                    seglist.append(segmentation)
                    labellist.append(labelmap)
                    print('The No. %d/%d caculation'%(step,num_iter))
                    #Miaimshow.subplots(Pro_imgs, num=step+2, cols=8)
            except tf.errors.OutOfRangeError:
                print('Done evalutaion -- epoch limit reached')
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()
            PreMap = np.array(PreMap)
            np.save(os.path.join(FLAGS.dataset_dir,'liverspProbmap.npy'), PreMap)
            # PreMap=np.squeeze(PreMap,axis=1)

           # PreMap_flat = PreMap.ravel()
           # PreMap_flat=np.divide((PreMap_flat - np.amin(PreMap_flat)) * 255, (np.amax(PreMap_flat) - np.amin(PreMap_flat)))
            m = 0
            for i in range(len(rindex)):
                segResult[spmap==rindex[i]]=np.array(seglist).ravel()[i]
                segPromap[spmap==rindex[i]]=PreMap[i,1]
                segPromapEdge[spmap==rindex[i]]=PreMap[i,2]
                groundtruth[spmap==rindex[i]]=np.array(labellist).ravel()[i]

            coord.join(threads)

            fig,ax= plt.subplots(nrows=2,ncols=3)

            from skimage.segmentation import mark_boundaries
            ax[0,0].set_title('Segmentation with superpixel map')
            ax[0,0].imshow(mark_boundaries(segResult, spmap))
            ax[0,1].set_title('Segmentation with ground truth map')
            ax[0,1].imshow(segResult)
            ax[0,1].imshow(labelOrg,alpha=0.5,cmap='jet')
            ax[0,2].set_title('Reading label')
            ax[0,2].imshow(groundtruth)

            ax[1,0].set_title('liver Probabilities map')
            ax[1,0].imshow(segPromap)
            ax[1,1].set_title('edge Probabilities map')
            ax[1,1].imshow(segPromapEdge)
            ax[1,2].set_title('liver +edge Probabilities map')
            ax[1,2].imshow(segPromapEdge+segPromap)

            segthpro=segPromapEdge+segPromap
            segthpro[segthpro<0.8]=0

    from skimage.segmentation import active_contour
    from skimage.measure import find_contours
    from skimage.filters import gaussian
    from skimage import morphology

    #edg=sobel(segResult.astype(int))
    segmorp=morphology.remove_small_objects(segthpro.astype(bool),5000)
    segopen=morphology.opening(segmorp,morphology.disk(3))
    segclose=morphology.closing(segopen,morphology.disk(15))
    fig,ax=plt.subplots(1,3)
    ax=ax.ravel()
    ax[0].imshow(segmorp)
    ax[0].set_title('Removed the small objects')
    ax[1].imshow(segopen)
    ax[1].set_title('After open operation')
    ax[2].imshow(segclose)
    ax[2].imshow(labelOrg,alpha=0.5,cmap='jet')
    ax[2].set_title('After close operation')
    plt.axis('off')
    from MiaUtils import Miametrics as metric
    mt=metric.MiaMetrics(logger)
    dsc=mt.DSCMetric(segclose,labelOrg.astype(bool))
    print('The dice similarity coefficient score is {}'.format(dsc))

    voe=mt.VOEMetric(segclose,labelOrg.astype(bool))
    print('The Volume overlap Error score is {}'.format(voe))

    rvd=mt.RVDMetric(segclose,labelOrg.astype(bool))
    print('The Relative voume difference score is {}'.format(rvd))

    from medpy.metric.binary import hd
    from medpy.metric.binary import asd
    from medpy.metric.binary import  obj_fpr
    from medpy.metric.binary import  obj_tpr
    Asd=asd(segclose,labelOrg.astype(bool))
    print('The Asd score is {}'.format(Asd))

    HD=hd(segclose,labelOrg.astype(bool))
    print('The Hausdorff Distance score is {}'.format(HD))
###************************************************************************
    ####superpixel-graph cuts mehtod computation
#######********************************************************************
    from skimage import segmentation, color,filters
    from skimage.future import graph
    img=DataNormalize(imgOrg)/255
    img=np.dstack((np.dstack((img, img)), img))
    labels1 = segmentation.slic(img, compactness=5, n_segments=2000,sigma=1)
    #labels1=spmap
    out1 = color.label2rgb(labels1, img, kind='avg')
    edge_map = filters.sobel(color.rgb2gray(img))
    g = graph.rag_boundary(labels1, edge_map)
    #g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    out2 = color.label2rgb(labels2, img, kind='avg')

    fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
    ax[0].imshow(out1)
    ax[1].imshow(out2)
    for a in ax:
       a.axis('off')
    plt.tight_layout()