Exemplo n.º 1
0
	def loader(img_path, mask_path):
		image, gt =  utils.load_vol_brats(img_path, slicen=nslice)
		return image, gt
Exemplo n.º 2
0
 def loader(img_path, mask_path):
     image, gt = utils.load_vol_brats(img_path, slicen=nslice)
     return image[:, :, seq_map[seq]][:, :, None], gt
Exemplo n.º 3
0
infoclasses['ET'] = (3,)
infoclasses['CT'] = (1,3)

for layer_name in layer_names:
	try:
		dissector = spatial.Dissector(model=model,
			                layer_name = layer_name, seq='all')
	except:
		continue

	threshold_maps = dissector.get_threshold_maps(dataset_path = data_root_path,
		                                        save_path  = 'results_ET/Dissection/simnet/threshold_maps/',
		                                        percentile = 85,
							loader=dataloader())

	image, gt = utils.load_vol_brats('../sample_vol/brats/Brats18_CBICA_AOP_1', slicen=105)

	maks_path = '../sample_vol/brats/Brats18_CBICA_AOP_1/mask.nii.gz'
	ROI = sitk.GetArrayFromImage(sitk.ReadImage(maks_path))[105, :, :]
	dissector.apply_threshold(image, threshold_maps, 
		                nfeatures=25, 
		                save_path='results_ET/Dissection/simnet/feature_maps/', 
		                ROI = ROI)

	dissector.quantify_gt_features(image, gt, 
		                threshold_maps, 
		                nclasses=infoclasses, 
		                nfeatures=None, 
		                save_path='results_ET/Dissection/simnet/csv/',
		                save_fmaps=False, 
		                ROI = ROI)
Exemplo n.º 4
0
    custom_objects={
        'gen_dice_loss': gen_dice_loss,
        'dice_whole_metric': dice_whole_metric,
        'dice_core_metric': dice_core_metric,
        'dice_en_metric': dice_en_metric
    })

model.load_weights(
    '/home/pi/Projects/BioExp/trained_models/densedrop/model_lrsch.hdf5',
    by_name=True)

if __name__ == '__main__':
    list_ = []
    for volume in [32, 20, 24, 53, 12, 14]:
        for slice_ in range(20, 140, 5):
            test_image, gt = load_vol_brats(test_path[volume], slice_, pad=0)

            D = uncertainty(test_image)

            # for aleatoric
            mean, var = D.aleatoric(model, iterations=50)

            # for epistemic
            mean, var = D.epistemic(model, iterations=50)

            # for combined
            mean, var = D.combined(model, iterations=50)

            print(np.mean(var, axis=(0, 1, 2)))
            list_.append(np.mean(var, axis=(0, 1, 2)))
Exemplo n.º 5
0
                   custom_objects={
                       'gen_dice_loss': gen_dice_loss,
                       'dice_whole_metric': dice_whole_metric,
                       'dice_core_metric': dice_core_metric,
                       'dice_en_metric': dice_en_metric
                   })

weights_path = '/media/balaji/CamelyonProject/parth/checkpoints/double_headed_autoencoder/autoencoder_double_headed_no_skip_weights.hdf5'
model.save_weights(weights_path)

for layer_name in layers_to_consider:
    # for i, file in enumerate(glob(data_root_path +'*')[5:5	+num_images]):

    model.load_weights(weights_path)
    image, gt = utils.load_vol_brats(
        '/media/balaji/CamelyonProject/parth/brats_2018/val/Brats18_2013_3_1',
        slicen=78)

    A = ablation.Ablate(model,
                        weights_path,
                        metric,
                        layer_name,
                        image[None, ...],
                        gt,
                        classes=infoclasses,
                        image_name=str(i))
    path = 'results/'
    os.makedirs(path, exist_ok=True)

    df1 = A.ablate_filters(save_path='./results/', step=2)
    # 	if i == 0:
Exemplo n.º 6
0
data_root_path = '../sample_vol/'

model_path = '../trained_models/U_resnet/ResUnet.h5'
weights_path = '../trained_models/U_resnet/ResUnet.40_0.559.hdf5'


model = load_model(model_path, custom_objects={'gen_dice_loss':gen_dice_loss,
                                        'dice_whole_metric':dice_whole_metric,
                                        'dice_core_metric':dice_core_metric,
                                        'dice_en_metric':dice_en_metric})
model.load_weights(weights_path)

infoclasses = {}
for i in range(1): infoclasses['class_'+str(i)] = (i,)
infoclasses['whole'] = (1,2,3)

data_root_path = '../sample_vol/'
layer_name = 'conv2d_3'
test_image, gt = utils.load_vol_brats('../sample_vol/Brats18_CBICA_ARZ_1', slicen=78)
A = spatial.Ablation(model = moedl, 
				weights_pth = weights_path, 
				metric      = dice_label_coef, 
				layer_name  = layer_name, 
				test_image  = test_image, 
				gt 	    = gt, 
				classes     = infoclasses, 
				nclasses    = 4)

df = A.ablate_filter(step = 1)

Exemplo n.º 7
0
        'gen_dice_loss': gen_dice_loss,
        'dice_whole_metric': dice_whole_metric,
        'dice_core_metric': dice_core_metric,
        'dice_en_metric': dice_en_metric
    })

model_no_drop.load_weights(
    '/home/parth/Interpretable_ML/saved_models/densenet/densenet.55_0.522.hdf5',
    by_name=True)

if __name__ == '__main__':
    list_ = []
    for volume in [10]:
        for slice_ in range(78, 79):
            test_image, gt = load_vol_brats(glob(test_path)[volume],
                                            slice_,
                                            pad=8)

            D = uncertainty(test_image)

            # for aleatoric
            mean, var = D.aleatoric(model_no_drop, iterations=50)

            D.save(mean, var, gt)

            # for epistemic
            mean, var = D.epistemic(model, iterations=50)

            D.save(mean, var, gt)

            # # for combined
Exemplo n.º 8
0
for i in range(4): infoclasses['class_'+str(i)] = (i,)
infoclasses['whole'] = (1,2,3,)
infoclasses['ET'] = (3,)
infoclasses['CT'] = (1,3,)
num_images = 5

model = load_model(model_path, compile=False, custom_objects={'gen_dice_loss':gen_dice_loss,
	                                'dice_whole_metric':dice_whole_metric,
	                                'dice_core_metric':dice_core_metric,
	                                'dice_en_metric':dice_en_metric})


for layer_name in layers_to_consider:	
	for i, file in tqdm(enumerate(glob(data_root_path +'*')[5 : 5 + num_images])):

		model.load_weights(weights_path)
		image, gt = utils.load_vol_brats(file, slicen=78)
		image = image[None, ...]

		A = ablation.Ablate(model, weights_path, dice_label_coef, layer_name, image, gt, classes = infoclasses, image_name=str(i))
		path = 'results/'
		os.makedirs(path, exist_ok=True)


		df1 = A.ablate_filters(save_path='./results/', step=4)
		if i == 0: df = df1
		else: df.iloc[:,1:] += df1.iloc[:,1:]
	
	df.iloc[:,1:] = df.iloc[:,1:]/(1. * num_images)
	print (df)