def measure(self, generated, vessels, masks, num_data, iter_time, phase, total_time): # masking vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask( vessels, generated, masks) # averaging processing time avg_pt = (total_time / num_data) * 1000 # average processing tiem # evaluate Area Under the Curve of ROC and Precision-Recall auc_roc = utils.AUC_ROC(vessels_in_mask, generated_in_mask) auc_pr = utils.AUC_PR(vessels_in_mask, generated_in_mask) # binarize to calculate Dice Coeffient binarys_in_mask = utils.threshold_by_otsu(generated, masks) dice_coeff = utils.dice_coefficient_in_train(vessels_in_mask, binarys_in_mask) acc, sensitivity, specificity = utils.misc_measures( vessels_in_mask, binarys_in_mask) score = auc_pr + auc_roc + dice_coeff + acc + sensitivity + specificity # # auc_sum for saving best model in training # auc_sum = auc_roc + auc_pr # if self.flags.stage == 2: # #auc_sum = auc_roc + auc_pr # auc_sum = auc_roc + auc_pr # else: # auc_sum = auc_roc + auc_pr auc_sum = dice_coeff + acc + auc_pr # print information ord_output = collections.OrderedDict([('auc_pr', auc_pr), ('auc_roc', auc_roc), ('dice_coeff', dice_coeff), ('acc', acc), ('sensitivity', sensitivity), ('specificity', specificity), ('score', score), ('auc_sum', auc_sum), ('best_auc_sum', self.best_auc_sum), ('avg_pt', avg_pt)]) utils.print_metrics(iter_time, ord_output) # write in tensorboard when in train mode only if phase == 'train': self.model.measure_assign(auc_pr, auc_roc, dice_coeff, acc, sensitivity, specificity, score, iter_time) elif phase == 'test': # write in npy format for evaluation utils.save_obj(vessels_in_mask, generated_in_mask, os.path.join(self.auc_out_dir, "auc_roc.npy"), os.path.join(self.auc_out_dir, "auc_pr.npy")) return auc_sum
def measure(self, generated, vessels, masks, num_data, iter_time, phase, total_time): vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask( vessels, generated, masks) avg_pt = (total_time / num_data) * 1000 # average processing tiem # evaluation auc_roc = utils.AUC_ROC(vessels_in_mask, generated_in_mask) auc_pr = utils.AUC_PR(vessels_in_mask, generated_in_mask) binarys_in_mask = utils.threshold_by_otsu(generated, masks) dice_coeff = utils.dice_coefficient_in_train(vessels_in_mask, binarys_in_mask) acc, sensitivity, specificity = utils.misc_measures( vessels_in_mask, binarys_in_mask) score = auc_pr + auc_roc + dice_coeff + acc + sensitivity + specificity # print information ord_output = collections.OrderedDict([('auc_pr', auc_pr), ('auc_roc', auc_roc), ('dice_coeff', dice_coeff), ('acc', acc), ('sensitivity', sensitivity), ('specificity', specificity), ('score', score), ('best_dice_coeff', self.best_dice_coeff), ('avg_pt', avg_pt)]) utils.print_metrics(iter_time, ord_output) # write in tensorboard if phase == 'train': self.model.measure_assign(auc_pr, auc_roc, dice_coeff, acc, sensitivity, specificity, score, iter_time) if phase == 'test': # write in npy format for evaluation utils.save_obj(vessels_in_mask, generated_in_mask, os.path.join(self.auc_out_dir, "auc_roc.npy"), os.path.join(self.auc_out_dir, "auc_pr.npy")) return dice_coeff
# visualize results if "V-GAN" in result or "DRIU" in result or "1st_manual" in result: test_dir = testdata.format(os.path.basename(dataset)) ori_imgs = utils.load_images_under_dir(test_dir) vessels_dir = vessels_out.format(os.path.basename(dataset), os.path.basename(result)) filenames = utils.all_files_under(result) if not os.path.isdir(vessels_dir): os.makedirs(vessels_dir) for index in range(gt_vessels.shape[0]): # thresholded_vessel=utils.threshold_by_f1(np.expand_dims(gt_vessels[index,...], axis=0), # np.expand_dims(pred_vessels[index,...], axis=0), # np.expand_dims(masks[index,...], axis=0), # flatten=False)*255 thresholded_vessel = utils.threshold_by_otsu( np.expand_dims(pred_vessels[index, ...], axis=0), np.expand_dims(masks[index, ...], axis=0), flatten=False) * 255 ori_imgs[index, ...][np.squeeze(thresholded_vessel, axis=0) == 0] = (0, 0, 0) # ori_imgs[index,...]*=np.tile(np.expand_dims(pred_vessels[index,...], axis=3), (1,1,3)) Image.fromarray(ori_imgs[index, ...].astype( np.uint8)).save( os.path.join(vessels_dir, os.path.basename(filenames[index]))) # compare with the ground truth comp_dir = comparison_out.format(os.path.basename(dataset), os.path.basename(result)) if not os.path.isdir(comp_dir): os.makedirs(comp_dir) dice_list = []
# G gan_x_test, gan_y_test=utils.input2gan(val_imgs, val_vessels, d_out_shape) loss,acc=gan.evaluate(gan_x_test,gan_y_test, batch_size=batch_size, verbose=0) utils.print_metrics(n_round+1, acc=acc, loss=loss, type='GAN') # save the weights g.save_weights(os.path.join(model_out_dir,"g_{}_{}_{}.h5".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg))) # update step sizes, learning rates scheduler.update_steps(n_round) K.set_value(d.optimizer.lr, scheduler.get_lr()) K.set_value(gan.optimizer.lr, scheduler.get_lr()) # evaluate on test images if n_round in rounds_for_evaluation: generated=g.predict(test_imgs,batch_size=batch_size) generated=np.squeeze(generated, axis=3) vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(test_vessels, generated , test_masks) auc_roc=utils.AUC_ROC(vessels_in_mask,generated_in_mask,os.path.join(auc_out_dir,"auc_roc_{}.npy".format(n_round))) auc_pr=utils.AUC_PR(vessels_in_mask, generated_in_mask,os.path.join(auc_out_dir,"auc_pr_{}.npy".format(n_round))) binarys_in_mask=utils.threshold_by_otsu(generated,test_masks) dice_coeff=utils.dice_coefficient_in_train(vessels_in_mask, binarys_in_mask) acc, sensitivity, specificity=utils.misc_measures(vessels_in_mask, binarys_in_mask) utils.print_metrics(n_round+1, auc_pr=auc_pr, auc_roc=auc_roc, dice_coeff=dice_coeff, acc=acc, senstivity=sensitivity, specificity=specificity, type='TESTING') # print test images segmented_vessel=utils.remain_in_mask(generated, test_masks) for index in range(segmented_vessel.shape[0]): Image.fromarray((segmented_vessel[index,:,:]*255).astype(np.uint8)).save(os.path.join(img_out_dir,str(n_round)+"_{:02}_segmented.png".format(index+1)))