def measure(self, generated, vessels, masks, num_data, iter_time, phase,
                total_time):
        # masking
        vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(
            vessels, generated, masks)

        # averaging processing time
        avg_pt = (total_time / num_data) * 1000  # average processing tiem

        # evaluate Area Under the Curve of ROC and Precision-Recall
        auc_roc = utils.AUC_ROC(vessels_in_mask, generated_in_mask)
        auc_pr = utils.AUC_PR(vessels_in_mask, generated_in_mask)

        # binarize to calculate Dice Coeffient
        binarys_in_mask = utils.threshold_by_otsu(generated, masks)
        dice_coeff = utils.dice_coefficient_in_train(vessels_in_mask,
                                                     binarys_in_mask)
        acc, sensitivity, specificity = utils.misc_measures(
            vessels_in_mask, binarys_in_mask)
        score = auc_pr + auc_roc + dice_coeff + acc + sensitivity + specificity

        # # auc_sum for saving best model in training
        # auc_sum = auc_roc + auc_pr
        # if self.flags.stage == 2:
        #     #auc_sum = auc_roc + auc_pr
        #     auc_sum = auc_roc + auc_pr
        # else:
        #     auc_sum = auc_roc + auc_pr

        auc_sum = dice_coeff + acc + auc_pr

        # print information
        ord_output = collections.OrderedDict([('auc_pr', auc_pr),
                                              ('auc_roc', auc_roc),
                                              ('dice_coeff', dice_coeff),
                                              ('acc', acc),
                                              ('sensitivity', sensitivity),
                                              ('specificity', specificity),
                                              ('score', score),
                                              ('auc_sum', auc_sum),
                                              ('best_auc_sum',
                                               self.best_auc_sum),
                                              ('avg_pt', avg_pt)])
        utils.print_metrics(iter_time, ord_output)

        # write in tensorboard when in train mode only
        if phase == 'train':
            self.model.measure_assign(auc_pr, auc_roc, dice_coeff, acc,
                                      sensitivity, specificity, score,
                                      iter_time)
        elif phase == 'test':
            # write in npy format for evaluation
            utils.save_obj(vessels_in_mask, generated_in_mask,
                           os.path.join(self.auc_out_dir, "auc_roc.npy"),
                           os.path.join(self.auc_out_dir, "auc_pr.npy"))

        return auc_sum
Ejemplo n.º 2
0
    def measure(self, generated, vessels, masks, num_data, iter_time, phase,
                total_time):
        vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(
            vessels, generated, masks)
        avg_pt = (total_time / num_data) * 1000  # average processing tiem

        # evaluation
        auc_roc = utils.AUC_ROC(vessels_in_mask, generated_in_mask)
        auc_pr = utils.AUC_PR(vessels_in_mask, generated_in_mask)

        binarys_in_mask = utils.threshold_by_otsu(generated, masks)
        dice_coeff = utils.dice_coefficient_in_train(vessels_in_mask,
                                                     binarys_in_mask)
        acc, sensitivity, specificity = utils.misc_measures(
            vessels_in_mask, binarys_in_mask)
        score = auc_pr + auc_roc + dice_coeff + acc + sensitivity + specificity

        # print information
        ord_output = collections.OrderedDict([('auc_pr', auc_pr),
                                              ('auc_roc', auc_roc),
                                              ('dice_coeff', dice_coeff),
                                              ('acc', acc),
                                              ('sensitivity', sensitivity),
                                              ('specificity', specificity),
                                              ('score', score),
                                              ('best_dice_coeff',
                                               self.best_dice_coeff),
                                              ('avg_pt', avg_pt)])

        utils.print_metrics(iter_time, ord_output)

        # write in tensorboard
        if phase == 'train':
            self.model.measure_assign(auc_pr, auc_roc, dice_coeff, acc,
                                      sensitivity, specificity, score,
                                      iter_time)

        if phase == 'test':
            # write in npy format for evaluation
            utils.save_obj(vessels_in_mask, generated_in_mask,
                           os.path.join(self.auc_out_dir, "auc_roc.npy"),
                           os.path.join(self.auc_out_dir, "auc_pr.npy"))

        return dice_coeff
Ejemplo n.º 3
0
        # G
        gan_x_test, gan_y_test=utils.input2gan(val_imgs, val_vessels, d_out_shape)
        loss,acc=gan.evaluate(gan_x_test,gan_y_test, batch_size=batch_size, verbose=0)
        utils.print_metrics(n_round+1, acc=acc, loss=loss, type='GAN')
        
        # save the weights
        g.save_weights(os.path.join(model_out_dir,"g_{}_{}_{}.h5".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg)))
       
    # update step sizes, learning rates
    scheduler.update_steps(n_round)
    K.set_value(d.optimizer.lr, scheduler.get_lr())    
    K.set_value(gan.optimizer.lr, scheduler.get_lr())    
    
    # evaluate on test images
    if n_round in rounds_for_evaluation:    
        generated=g.predict(test_imgs,batch_size=batch_size)
        generated=np.squeeze(generated, axis=3)
        vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(test_vessels, generated , test_masks)
        auc_roc=utils.AUC_ROC(vessels_in_mask,generated_in_mask,os.path.join(auc_out_dir,"auc_roc_{}.npy".format(n_round)))
        auc_pr=utils.AUC_PR(vessels_in_mask, generated_in_mask,os.path.join(auc_out_dir,"auc_pr_{}.npy".format(n_round)))
        binarys_in_mask=utils.threshold_by_otsu(generated,test_masks)
        dice_coeff=utils.dice_coefficient_in_train(vessels_in_mask, binarys_in_mask)
        acc, sensitivity, specificity=utils.misc_measures(vessels_in_mask, binarys_in_mask)
        utils.print_metrics(n_round+1, auc_pr=auc_pr, auc_roc=auc_roc, dice_coeff=dice_coeff, 
                            acc=acc, senstivity=sensitivity, specificity=specificity, type='TESTING')
         
        # print test images
        segmented_vessel=utils.remain_in_mask(generated, test_masks)
        for index in range(segmented_vessel.shape[0]):
            Image.fromarray((segmented_vessel[index,:,:]*255).astype(np.uint8)).save(os.path.join(img_out_dir,str(n_round)+"_{:02}_segmented.png".format(index+1)))
Ejemplo n.º 4
0
                    Image.fromarray(ori_imgs[index,...].astype(np.uint8)).save(os.path.join(vessels_dir,os.path.basename(filenames[index])))
                
                # compare with the ground truth
                comp_dir=comparison_out.format(os.path.basename(dataset),os.path.basename(result))
                if not os.path.isdir(comp_dir):
                    os.makedirs(comp_dir)
                for index in range(gt_vessels.shape[0]):
                    diff_map=utils.difference_map(gt_vessels[index,...], pred_vessels[index,...], masks[index,...])
                    Image.fromarray(diff_map.astype(np.uint8)).save(os.path.join(comp_dir,os.path.basename(filenames[index])))
            
            # skip the ground truth
            if "1st_manual" not in result:
                # print metrics
                print "-- {} --".format(os.path.basename(result))
                print "dice coefficient : {}".format(utils.dice_coefficient(gt_vessels,pred_vessels, masks))
                print "f1 score : {}, accuracy : {}, specificity : {}, sensitivity : {}".format(*utils.misc_measures(gt_vessels,pred_vessels, masks))

                # compute false positive rate, true positive graph
                method=os.path.basename(result)
                methods.append(method)
                if method=='CRFs' or method=='2nd_manual':
                    cm=confusion_matrix(gt_vessels_in_mask, pred_vessels_in_mask)
                    fpr=1-1.*cm[0,0]/(cm[0,1]+cm[0,0])
                    tpr=1.*cm[1,1]/(cm[1,0]+cm[1,1])
                    prec=1.*cm[1,1]/(cm[0,1]+cm[1,1])
                    recall=tpr
                else:
                    fpr, tpr, _ = roc_curve(gt_vessels_in_mask, pred_vessels_in_mask)
                    prec, recall, _ = precision_recall_curve(gt_vessels_in_mask, pred_vessels_in_mask)
                fprs.append(fpr)
                tprs.append(tpr)
Ejemplo n.º 5
0
                                                    pred_vessels[index, ...],
                                                    masks[index, ...])
                    Image.fromarray(diff_map.astype(np.uint8)).save(
                        os.path.join(comp_dir,
                                     os.path.basename(filenames[index])))

            # skip the ground truth
            if "1st_manual" not in result:
                # print metrics
                print("-- {} --".format(os.path.basename(result)))
                print("dice coefficient : {0:.4f}".format(
                    utils.dice_coefficient(gt_vessels, pred_vessels, masks)))
                print(
                    "f1 score : {0:.4f}, accuracy : {1:.4f}, specificity : {2:.4f}, sensitivity : {3:.4f}"
                    .format(
                        *utils.misc_measures(gt_vessels, pred_vessels, masks)))

                # compute false positive rate, true positive graph
                method = os.path.basename(result)
                methods.append(method)
                if method == 'CRFs' or method == '2nd_manual':
                    cm = confusion_matrix(gt_vessels_in_mask,
                                          pred_vessels_in_mask)
                    fpr = 1 - 1. * cm[0, 0] / (cm[0, 1] + cm[0, 0])
                    tpr = 1. * cm[1, 1] / (cm[1, 0] + cm[1, 1])
                    prec = 1. * cm[1, 1] / (cm[0, 1] + cm[1, 1])
                    recall = tpr
                else:
                    fpr, tpr, _ = roc_curve(gt_vessels_in_mask,
                                            pred_vessels_in_mask)
                    prec, recall, _ = precision_recall_curve(
Ejemplo n.º 6
0
                    Image.fromarray(ori_imgs[index,...].astype(np.uint8)).save(os.path.join(vessels_dir,os.path.basename(filenames[index])))
                
                # compare with the ground truth
                comp_dir=comparison_out.format(os.path.basename(dataset),os.path.basename(result))
                if not os.path.isdir(comp_dir):
                    os.makedirs(comp_dir)
                for index in range(gt_vessels.shape[0]):
                    diff_map=utils.difference_map(gt_vessels[index,...], pred_vessels[index,...], masks[index,...])
                    Image.fromarray(diff_map.astype(np.uint8)).save(os.path.join(comp_dir,os.path.basename(filenames[index])))
            
            # skip the ground truth
            if "1st_manual" not in result:
                # print metrics
                print("-- {} --".format(os.path.basename(result)))
                print("dice coefficient : {0:.4f}".format(utils.dice_coefficient(gt_vessels,pred_vessels, masks)))
                print("f1 score : {0:.4f}, accuracy : {1:.4f}, specificity : {2:.4f}, sensitivity : {3:.4f}".format(*utils.misc_measures(gt_vessels,pred_vessels, masks)))

                # compute false positive rate, true positive graph
                method=os.path.basename(result)
                methods.append(method)
                if method=='CRFs' or method=='2nd_manual':
                    cm=confusion_matrix(gt_vessels_in_mask, pred_vessels_in_mask)
                    fpr=1-1.*cm[0,0]/(cm[0,1]+cm[0,0])
                    tpr=1.*cm[1,1]/(cm[1,0]+cm[1,1])
                    prec=1.*cm[1,1]/(cm[0,1]+cm[1,1])
                    recall=tpr
                else:
                    fpr, tpr, _ = roc_curve(gt_vessels_in_mask, pred_vessels_in_mask)
                    prec, recall, _ = precision_recall_curve(gt_vessels_in_mask, pred_vessels_in_mask)
                fprs.append(fpr)
                tprs.append(tpr)