def thresh_size_search_single(result_set, images, thresholds, lesion_sizes, compute_lesion_metrics=False): true_vols, prob_vols = [], [] for img in images: true_vols.append(img.labels[0]) prob_vols.append(result_set[img.id] if img.id in result_set else None) # Generate result filename and try to load_samples results metrics_list = list() metrics_names = list() for n, (thresh, lesion_size) in enumerate(itertools.product(thresholds, lesion_sizes)): printProgressBar(n, len(thresholds)*len(lesion_sizes), suffix=" parameters evaluated") metrics_iter = list() for lesion_probs, true_vol in zip(prob_vols, true_vols): if lesion_probs is not None: rec_vol = ThreshSizeBinarizer(thresh, lesion_size).binarize(lesion_probs) else: continue metrics_iter.append( compute_segmentation_metrics(true_vol, rec_vol, lesion_metrics=compute_lesion_metrics)) m_avg_std = compute_avg_std_metrics_list(metrics_iter) metrics_list.append(m_avg_std) metrics_names.append("th={}_ls={}".format(thresh, lesion_size)) printProgressBar(len(thresholds)*len(lesion_sizes), len(thresholds)*len(lesion_sizes), suffix=" parameters evaluated") return metrics_list, metrics_names
def __init__(self, path, modalities=('t1w',), nvols=229): super().__init__() print("Loading ATLAS dataset...") dataset_path = os.path.expanduser(path) mni_brain_mask = nib.load(os.path.join(os.path.expanduser('~/atlases/'), 'atlas_brain_mask.nii.gz')).get_data() """ img = nib.Nifti1Image(mni_brain_mask.astype('uint8'), np.eye(4)) img.to_filename('atlas_brain_mask.nii.gz') raise NotImplementedError """ loaded_samples = 0 for root, subdirs, files in os.walk(dataset_path): if any(['t1w' in filename for filename in files]): printProgressBar(loaded_samples, nvols, suffix='samples loaded') loaded_samples += 1 t1_path = [os.path.join(root, filename) for filename in files if 't1w' in filename][0] lesion_paths = [os.path.join(root, filename) for filename in files if 'LesionSmooth' in filename] sample_id = '{}_{}_{}'.format( t1_path[t1_path.find('Site') + 4], t1_path[t1_path.find('/t0') + 3], os.path.basename(t1_path).split('_')[0]) # Load volume to check dimensions (not the same for all train samples) nib_file = nib.load(t1_path) vol = nib_file.get_data() data = np.zeros((len(modalities),) + vol.shape, dtype='float32') labels = np.zeros((1,) + vol.shape, dtype='float32') # DATA data[0] = vol * mni_brain_mask foreground_mask = mni_brain_mask # LABELS for lesion_file in lesion_paths: labels = np.logical_or(nib.load(lesion_file).get_data() > 0, labels) sample = NIC_Image(sample_id, nib_file, data, foreground_mask, labels) self.train.append(sample) if loaded_samples > nvols: break printProgressBar(nvols, nvols, suffix='samples loaded')
def generate_instructions(self, images): assert isinstance(images, list) and all( [isinstance(image, NIC_Image) for image in images]) set_instructions = [] for idx, image in enumerate(images): printProgressBar(idx, len(images), suffix='samples processed') centers = self.sampler.get_centers(image) if isinstance(centers, tuple): # Sampling that have two sets of centers pos_centers, unif_centers = centers lesion_instructions = get_instructions_from_centers( image.id, pos_centers, self.in_shape, self.out_shape, augment_to=self.augment_positives, autoencoder=self.autoencoder) unif_instructions = get_instructions_from_centers( image.id, unif_centers, self.in_shape, self.out_shape, augment_to=None, autoencoder=self.autoencoder) image_instructions = lesion_instructions + unif_instructions else: image_instructions = get_instructions_from_centers( image.id, centers, self.in_shape, self.out_shape, augment_to=self.augment_positives, autoencoder=self.autoencoder) set_instructions += image_instructions printProgressBar(len(images), len(images), suffix='samples processed') return set_instructions
def thresh_size_search(result_set, images, thresholds, lesion_sizes, compute_lesion_metrics=False): # 6x faster than the inefficient one true_vols, prob_vols = [], [] for img in images: true_vols.append(img.labels[0]) prob_vols.append(result_set[img.id] if img.id in result_set else None) # Generate result filename and try to load_samples results metrics_list = list() metrics_names = list() for n, (thresh, lesion_size) in enumerate(itertools.product(thresholds, lesion_sizes)): printProgressBar(n, len(thresholds) * len(lesion_sizes), suffix=" parameters evaluated") threads = [] metrics_iter = [None] * len(prob_vols) for sample_idx, (lesion_probs, true_vol) in enumerate(zip(prob_vols, true_vols)): if lesion_probs is None: continue process = Thread(target=process_sample_metrics, args=[true_vol, lesion_probs, thresh, lesion_size, compute_lesion_metrics, metrics_iter, sample_idx]) process.start() threads.append(process) # Ensure every volume has been processed and remove none entries from results for process in threads: process.join() metrics_iter = [m for m in metrics_iter if m is not None] # in case incomplete prob set # Compute average for the specific thresh and lesion size and store m_avg_std = compute_avg_std_metrics_list(metrics_iter) metrics_list.append(m_avg_std) metrics_names.append("th={}_ls={}".format(thresh, lesion_size)) printProgressBar(len(thresholds) * len(lesion_sizes), len(thresholds) * len(lesion_sizes), suffix=" parameters evaluated") return metrics_list, metrics_names
def predict_sample(self, model, sample_in): assert isinstance(sample_in, NIC_Image) print("Predicting sample with id:{}".format(sample_in.id)) sample = zeropad_sample(sample_in, self.zeropad_shape) batch_size = self.instr_gen.bs sample_generator, instructions = self.instr_gen.build_patch_generator( sample, return_instructions=True) voting_img = np.zeros((self.num_classes, ) + sample.data[0].shape, dtype=np.float32) counting_img = np.zeros_like(voting_img) model.eval() model.to(self.device) if self.uncertainty_passes > 1: try: model.activate_dropout_testing(p_out=self.uncertainty_dropout, dotype=self.uncertainty_dotype) print("Activated uncertainty dropout with p={}".format( self.uncertainty_dropout)) except AttributeError as ae: print(str(ae), "Dropout at test time not configured for this model") self.uncertainty_passes = 1 with torch.no_grad(): # Turns off autograd (faster exec) eta = ElapsedTimeEstimator(total_iters=len(sample_generator)) for batch_idx, (x, y) in enumerate(sample_generator): printProgressBar(batch_idx, len(sample_generator), suffix=' patches predicted - ETA {}'.format( eta.update(batch_idx + 1))) # Send generated x,y batch to GPU if isinstance(x, list): for i in range(len(x)): x[i] = x[i].to(self.device) else: x = x.to(self.device) if isinstance(y, list): for i in range(len(y)): y[i] = y[i].to(self.device) else: y = y.to(self.device) y_pred = model(x) if self.uncertainty_passes > 1: for i in range(1, self.uncertainty_passes): y_pred = y_pred + model(x) y_pred = y_pred / self.uncertainty_passes y_pred = y_pred.cpu().numpy() if len(y_pred.shape) == 4: # Add third dimension to 2D patches y_pred = np.expand_dims(y_pred, axis=-1) batch_slice = slice(batch_idx * batch_size, (batch_idx + 1) * batch_size) batch_instructions = instructions[batch_slice] assert len(y_pred) == len(batch_instructions) for patch_pred, patch_instruction in zip( y_pred, batch_instructions): voting_img[ patch_instruction.data_patch_slice] += patch_pred counting_img[patch_instruction. data_patch_slice] += np.ones_like(patch_pred) printProgressBar(len(sample_generator), len(sample_generator), suffix=' patches predicted - {} s.'.format( eta.get_elapsed_time())) if self.uncertainty_passes > 1: model.deactivate_dropout_testing() counting_img[counting_img == 0.0] = 1.0 # Avoid division by 0 volume_probs = np.divide(voting_img, counting_img) if self.lesion_class is not None: volume_probs = volume_probs[self.lesion_class] else: volume_probs = np.squeeze(volume_probs, axis=0) volume_probs = remove_zeropad_volume(volume_probs, self.zeropad_shape) assert np.array_equal( volume_probs.shape, sample_in.foreground.shape), (volume_probs.shape, sample_in.foreground.shape) return volume_probs