예제 #1
0
def update_eval_metrics(preds, labels, eval_metrics):

    if len(labels.shape) == 2:
        preds = np.expand_dims(preds, axis=0)
        labels = np.expand_dims(labels, axis=0)

    N = labels.shape[0]

    for i in range(N):
        pred = preds[i, :, :]
        label = labels[i, :, :]
        eval_metrics['dice score'].append(dc(pred, label))
        eval_metrics['precision'].append(precision(pred, label))
        eval_metrics['recall'].append(recall(pred, label))
        eval_metrics['sensitivity'].append(sensitivity(pred, label))
        eval_metrics['specificity'].append(specificity(pred, label))

        if np.sum(pred) > 0 and np.sum(label) > 0:
            eval_metrics['hausdorff'].append(hd(pred, label))
            eval_metrics['hausdorff 95%'].append(hd95(pred, label))
            eval_metrics['asd'].append(asd(pred, label))
            eval_metrics['assd'].append(assd(pred, label))
            eval_metrics['jaccard'].append(jc(pred, label))
        else:
            eval_metrics['hausdorff'].append('nan')
            eval_metrics['hausdorff 95%'].append('nan')
            eval_metrics['asd'].append('nan')
            eval_metrics['assd'].append('nan')
            eval_metrics['jaccard'].append('nan')

    return eval_metrics
예제 #2
0
파일: evaluate.py 프로젝트: gmaher/tcl_code
def get_outputs(seg, seg_truth):
    print seg.shape, seg_truth.shape
    seg_thresh = utility.threshold(seg,THRESHOLD)
    print seg_thresh.shape
    contour = utility.listSegToContours(seg_thresh, meta_test[1,:],
        meta_test[0,:], ISOVALUE)
    errs = utility.listAreaOverlapError(contour, contours_test)
    thresh,ts = utility.cum_error_dist(errs,DX)
    roc = roc_curve(np.ravel(seg_truth),np.ravel(seg), pos_label=1)
    pr = precision_recall_curve(np.ravel(seg_truth),np.ravel(seg), pos_label=1)
    dorf = []
    emd = []
    asdl = []
    dice = []
    prec = []
    for i in range(len(seg_truth)):
        if np.sum(seg_thresh[i,:,:]) > 0.1 and np.sum(seg_truth[i,:,:,0]) > 0.1:
            e= hd(seg_thresh[i,:,:],seg_truth[i,:,:,0],meta_test[0,i][0])
            dorf.append(e)

            e_asd= assd(seg_thresh[i,:,:],seg_truth[i,:,:,0],meta_test[0,i][0])
            asdl.append(e_asd)
            # if np.sum(seg_thresh[i,:,:]) < 600 and np.sum(seg_truth[i,:,:,0]) < 600:
            #     print i,np.sum(seg_thresh[i,:,:]),np.sum(seg_truth[i,:,:,0])
            #     e_emd = utility.EMDSeg(seg_truth[i,:,:,0],seg_thresh[i,:,:], meta_test[0,i][0])
            #     emd.append(e_emd)

        edc = dc(seg_thresh[i,:,:],seg_truth[i,:,:,0])
        dice.append(edc)
        prec.append(precision(seg_thresh[i,:,:],seg_truth[i,:,:,0]))
    acc,mean_acc = calc_accuracy(seg, seg_truth)
    return (contour,errs,thresh,roc,pr,acc,mean_acc,dorf,dice, prec,asdl)
def binary_measures_numpy(result, target, binary_threshold=0.5):
    result_binary = (result > binary_threshold).astype(numpy.uint8)
    target_binary = (target > binary_threshold).astype(numpy.uint8)

    result = BinaryMeasuresDto(mpm.dc(result_binary, target_binary), numpy.Inf,
                               numpy.Inf,
                               mpm.precision(result_binary, target_binary),
                               mpm.sensitivity(result_binary, target_binary),
                               mpm.specificity(result_binary, target_binary))

    if result_binary.any() and target_binary.any():
        result.hd = mpm.hd(result_binary, target_binary)
        result.assd = mpm.assd(result_binary, target_binary)

    return result
예제 #4
0
def compute_scores(preds, labels):
    preds_data = preds.data.cpu().numpy()
    labels_data = labels.data.cpu().numpy()

    dice_score = dc(preds_data, labels_data)
    jaccard_coef = jc(preds_data, labels_data)
    hausdorff_dist = hd(preds_data, labels_data)
    asd_score = asd(preds_data, labels_data)
    assd_score = assd(preds_data, labels_data)
    precision_value = precision(preds_data, labels_data)
    recall_value = recall(preds_data, labels_data)
    sensitivity_value = sensitivity(preds_data, labels_data)
    specificity_value = specificity(preds_data, labels_data)
    return {
        'dice score': dice_score,
        'jaccard': jaccard_coef,
        'hausdorff': hausdorff_dist,
        'asd': asd_score,
        'assd': assd_score,
        'precision': precision_value,
        'recall': recall_value,
        'sensitivity': sensitivity_value,
        'specificity': specificity_value
    }
예제 #5
0
    # ratio = non_zero_count / (lab.shape[0] * lab.shape[1])
    # print(ratio)

    input = Variable(
        torch.from_numpy(img).type('torch.FloatTensor').cuda(gpu_id))
    res = fcn(input).detach().cpu().numpy()
    # res =res/res.max()
    # res = (res-1)/res.max()

    ret, res = cv2.threshold(res[0, 0, :, :], 0.8, 1, cv2.THRESH_BINARY)
    # res = res[0,0,:,:]
    dice = dc(lab, res)
    jaccard = jc(lab, res)
    sens = sensitivity(lab, res)
    spec = specificity(lab, res)
    pres = precision(lab, res)
    rec = recall(lab, res)

    print('dice:', dice)
    print('jaccad:', jaccard)
    print('sensitivity:', sens)
    print('precision:', pres)

    dc_list.append(dice)
    jc_list.append(jaccard)
    sens_list.append(sens)
    spec_list.append(spec)
    pres_list.append(pres)
    recall_list.append(rec)

    print('average dice:', sum(dc_list) / len(dc_list))
예제 #6
0
파일: eval.py 프로젝트: phantanphuc/LVTN
def calculate_precision():
    dictindex = []
    global prec
    with open('./label.txt') as f:
        content = f.readlines()
        for symbol in content:
            symbol = symbol.replace('\n', '')
            split = symbol.split(' ')
            dictindex.append(split[0])

    print('\nPrecision')
    net.eval()
    global dice_score
    global ite
    target_boxes = testset.boxes
    target_labels = testset.labels

    for batch_idx, (images, loc_targets,
                    conf_targets) in enumerate(testloader):
        if use_cuda:
            images = images.cuda()
            loc_targets = loc_targets.cuda()
            conf_targets = conf_targets.cuda()

        images = Variable(images, volatile=True)

        loc_preds, conf_preds = net(images)

        data_encoder = DataEncoder()
        conf_preds_list = []
        for i in range(batch_size):
            s_conf = F.softmax(conf_preds[i]).data
            conf_preds_list.append(s_conf)
        try:
            boxes, labels, scores = data_encoder.decodeforbatch(
                loc_preds.data, conf_preds_list)
            for b_id in range(batch_size):
                predict_res = find_boxescoreslabel(labels[b_id], boxes[b_id])
                target_res = find_boxescoreslabel(
                    target_labels[b_id + batch_idx * batch_size],
                    target_boxes[b_id + batch_idx * batch_size], False)
                #				pdb.set_trace()'
                print('[I %d]:' % (batch_idx))
                for box_id in range(len(predict_res)):
                    #					pdb.set_trace()
                    t_label = [item[0] for item in target_res]
                    la = predict_res[box_id][0]
                    if la in t_label:
                        predict_img = create_binaryImageforPrec(
                            predict_res[box_id][1])
                        #						pdb.set_trace()
                        #						imgg = Image.fromarray(predict_img)
                        #						imgg.show()
                        target_img = create_binaryImageforPrec(
                            target_res[t_label.index(la)][1], False)
                        #						img = Image.fromarray(target_img)
                        #						img.show()
                        #							pdb.set_trace()
                        prec[0][la] += binary.precision(
                            predict_img, target_img)
                        print('[la %d: %.5f]' % (la, prec[0][la]))
#						print(prec[0][la])
                    else:
                        print('no exist in t_label')
                    prec[1][la] += 1

        except:
            print('err')
예제 #7
0
    
    metricsDir = './{}/metrics/'.format(dataset, indexName)
    if not os.path.exists(metricsDir):
        os.makedirs(metricsDir)

    outputMetrics = metricsDir + '{}.csv'.format(indexName)
    filenames = glob.glob("./{}/cortadas/labels/*.tif".format(dataset))
    
    with open(outputMetrics, 'w') as csvfile:
        
        fieldNames = ['Image', 'Dice', 'Jaccard', 'Precision', 'Recall', 'Sensitivity', 'Specificity']    
        writer = csv.writer(csvfile, delimiter=";")
        writer.writerow(fieldNames)

        for maskFileName in filenames:

            fileName = maskFileName.replace('./{}/cortadas/labels/'.format(dataset), '')
            mask = cv2.imread(maskFileName, cv2.IMREAD_COLOR)
            processed = cv2.imread(processedDir + fileName, cv2.IMREAD_COLOR)

            dc = str(mbin.dc(mask, processed)).replace(".", ",")
            jc = str(mbin.jc(mask, processed)).replace(".", ",")

            precision = str(mbin.precision(processed, mask)).replace(".", ",")
            recall = str(mbin.recall(processed, mask)).replace(".", ",")
            
            sensitivity = str(mbin.sensitivity(processed, mask)).replace(".", ",")
            specificity = str(mbin.specificity(processed, mask)).replace(".", ",")

            writer.writerow([fileName, dc, jc, precision, recall, sensitivity, specificity])