def ukbb_sax_transform(self): train_transform = ts.Compose([ ts.PadNumpy(size=self.scale_size), ts.ToTensor(), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), ts.RandomFlip(h=True, v=True, p=self.random_flip_prob), ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val, zoom_range=self.scale_val, interp=('bilinear', 'nearest')), ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.RandomCrop(size=self.patch_size), ts.TypeCast(['float', 'long']) ]) valid_transform = ts.Compose([ ts.PadNumpy(size=self.scale_size), ts.ToTensor(), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.SpecialCrop(size=self.patch_size, crop_type=0), ts.TypeCast(['float', 'long']) ]) return {'train': train_transform, 'valid': valid_transform}
def ct_82_transform(self): train_transform = ts.Compose([ ts.ToTensor(), ts.RangeNormalize(0, 1), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), RandomFlip3D(z=True, x=True, y=True, p=self.random_flip_prob), RandomAffine3D(degrees=self.rotate_val, translate=self.shift_val, scale=self.scale_val, resample=Image.NEAREST), CustomCrop3D(size=self.patch_size, crop_type="random"), ts.AddChannel(axis=0), # ts.TypeCast(['float', 'float']) ]) valid_transform = ts.Compose([ ts.ToTensor(), ts.RangeNormalize(0, 1), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), CustomCrop3D(size=self.patch_size, crop_type="center"), ts.AddChannel(axis=0), # ts.TypeCast(['float', 'float']) ]) return {'train': train_transform, 'valid': valid_transform}
def get_multi_scale_crop_test_set(data_root, idx_file, scale_sizes, crop_size, aug_type='forty_crop', seg_root=None, mixture=False): dsets = [] if aug_type == 'forty_crop': for scale_size in scale_sizes: crop_types = [0, 1, 2, 3, 4] # 0: center crop, # 1: top left crop, 2: top right crop # 3: bottom right crop, 4: bottom left crop flips = [0, 1] # 0: no flip, 1: horizontal flip for i in crop_types: for j in flips: data_transform = transforms.Compose([ transforms.Scale(scale_size), # transforms.CenterCrop(crop_size), transforms.ToTensor(), RandomFlip(flips[j]), SpecialCrop((crop_size, crop_size), crop_type=crop_types[i]), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) if mixture: seg_transform = transforms.Compose([ transforms.Scale(crop_size), # transforms.CenterCrop(crop_size), transforms.ToTensor(), RandomFlip(flips[j]), # SpecialCrop(crop_size=(crop_size, crop_size), crop_type=crop_types[i]), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) dsets.append( MyImageFolder(root=data_root, idx_file=idx_file, transform=data_transform, seg_transform=seg_transform, seg_root=seg_root)) else: dsets.append( MyImageFolder(root=data_root, idx_file=idx_file, transform=data_transform)) return dsets
def gsd_pCT_train_transform(self, seed=None): if seed is None: seed = np.random.randint(0, 9999) # seed must be an integer for torch train_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), RandomFlipTransform(axes=self.flip_axis, p=self.random_flip_prob, seed=seed, max_output_channels=self.max_output_channels), RandomElasticTransform(seed=seed, p=self.random_elastic_prob, image_interpolation=Interpolation.BSPLINE, max_displacement=self.max_deform, num_control_points=self.elastic_control_points, max_output_channels=self.max_output_channels), RandomAffineTransform(scales = self.scale_val, degrees = (self.rotate_val), isotropic = True, default_pad_value = 0, image_interpolation = Interpolation.BSPLINE, seed=seed, p=self.random_affine_prob, max_output_channels=self.max_output_channels), RandomNoiseTransform(p=self.random_noise_prob, std=self.noise_std, seed=seed, max_output_channels=self.max_output_channels), ts.ChannelsFirst(), # ts.NormalizeMedicPercentile(norm_flag=(True, False)), # Todo apply channel wise normalisation ts.NormalizeMedic(norm_flag=(True, False)), # Todo eventually add random crop augmentation (fork torchsample and fix the Random Crop bug) # ts.ChannelsLast(), # seems to be needed for crop # ts.RandomCrop(size=self.patch_size), ts.TypeCast(['float', 'long']) ]) return train_transform
def gsd_pCT_train_transform(self, seed=None): if seed is None: seed = np.random.randint(0, 9999) # seed must be an integer for torch train_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), RandomFlipTransform(axes=self.flip_axis, flip_probability=self.flip_prob_per_axis, p=self.random_flip_prob, seed=seed, max_output_channels=self.max_output_channels, prudent=self.prudent), RandomElasticTransform(max_displacement=self.max_deform, num_control_points=self.elastic_control_points, image_interpolation='bspline', seed=seed, p=self.random_elastic_prob, max_output_channels=self.max_output_channels, verbose=self.verbose, prudent=self.prudent), RandomAffineTransform(scales=self.scale_val, degrees=self.rotate_val, translation=self.shift_val, isotropic=True, default_pad_value=0, image_interpolation='bspline', seed=seed, p=self.random_affine_prob, max_output_channels=self.max_output_channels, verbose=self.verbose, prudent=self.prudent), StandardizeImage(norm_flag=[True, True, True, False]), RandomNoiseTransform(mean=self.noise_mean, std=self.noise_std, seed=seed, p=self.random_noise_prob, max_output_channels=self.max_output_channels, prudent=self.prudent), # Todo eventually add random crop augmentation (fork torchsample and fix the Random Crop bug) ts.ChannelsFirst(), ts.TypeCast(['float', 'float']) ]) return train_transform
def isles2018_valid_transform(self, seed=None): valid_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.ChannelsFirst(), ts.TypeCast(['float', 'long']) ]) return valid_transform
def isles2018_train_transform(self, seed=None): train_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), ts.RandomFlip(h=True, v=True, p=self.random_flip_prob), ts.ChannelsFirst(), ts.TypeCast(['float', 'long']) ]) return train_transform
def gsd_pCT_valid_transform(self, seed=None): valid_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), StandardizeImage(norm_flag=[True, True, True, False]), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']) ]) return valid_transform
def ultrasound_transform(self): train_transform = ts.Compose([ts.ToTensor(), ts.TypeCast(['float']), ts.AddChannel(axis=0), ts.SpecialCrop(self.patch_size,0), ts.RandomFlip(h=True, v=False, p=self.random_flip_prob), ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val, zoom_range=self.scale_val, interp=('bilinear')), ts.StdNormalize(), ]) valid_transform = ts.Compose([ts.ToTensor(), ts.TypeCast(['float']), ts.AddChannel(axis=0), ts.SpecialCrop(self.patch_size,0), ts.StdNormalize(), ]) return {'train': train_transform, 'valid': valid_transform}
def test_3d_sax_transform(self): test_transform = ts.Compose([ ts.PadFactorNumpy(factor=self.division_factor), ts.ToTensor(), ts.ChannelsFirst(), ts.TypeCast(['float']), #ts.NormalizeMedicPercentile(norm_flag=True), ts.NormalizeMedic(norm_flag=True), ts.ChannelsLast(), ts.AddChannel(axis=0), ]) return {'test': test_transform}
def gsd_pCT_valid_transform(self, seed=None): valid_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), # ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.NormalizeMedic(norm_flag=(True, False)), # ts.ChannelsLast(), # ts.SpecialCrop(size=self.patch_size, crop_type=0), ts.TypeCast(['float', 'long']) ]) return valid_transform
def hms_sax_transform(self): # Training transformation # 2D Stack input - 3D High Resolution output segmentation train_transform = [] valid_transform = [] # First pad to a fixed size # Torch tensor # Channels first # Joint affine transformation # In-plane respiratory motion artefacts (translation and rotation) # Random Crop # Normalise the intensity range train_transform = ts.Compose([]) return {'train': train_transform, 'valid': valid_transform}
print("random seed: {}".format(c)) patient_id_G_test = random.sample(patient_id_G, 20) patient_id_H_test = random.sample(patient_id_H, 200) patient_id_G_train = list(patient_id_G.difference(patient_id_G_test)) patient_id_H_train = list(patient_id_H.difference(patient_id_H_test)) #transformed_images = Beijing_dataset(root_dir,patient_id_G_train,patient_id_H_train) transform_pipeline_train = tr.Compose([ tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float'), # tr.RangeNormalize(0,1), tr.RandomBrightness(-.2, .2), tr.RandomGamma(.8, 1.2), tr.RandomFlip(), tr.RandomAffine(rotation_range=5, translation_range=0.2, zoom_range=(0.9, 1.1)) ]) transform_pipeline_test = tr.Compose([ tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float') # tr.RangeNormalize(0, 1) ]) transformed_images = Beijing_dataset(root_dir, patient_id_G_train,
def __init__(self, root_dir, split, transform=None, preload_data=True, modalities=['7T_T2']): super(CMR3DDataset_MultiClass_MultiProj_infer, self).__init__() # Type of modalities self.TypeOfModal = modalities # For now we assume all projections are axial - no coronal projections image_dir = join(root_dir, split, 'image') self.image_filenames = [] for mod in self.TypeOfModal: if mod == '7T_T2_cor': mod = '7T_T2' tmp_filenames = [ join(image_dir, x) for x in listdir(image_dir) if (is_image_file(x) and x.find(mod) != -1) ] self.image_filenames.append(sorted(tmp_filenames)) # # - DEBUG - # tmp_filenames = [1, 2] # self.image_filenames = [['/home/udall-raid2/DBS_collaborators/DBS_for_orens/DiseaseClassification/inference_db_stn_net/test/image/_7T_T2_Coronal_P0.nii.gz', # Good # '/home/udall-raid2/DBS_collaborators/DBS_for_orens/DiseaseClassification/inference_db_stn_net/test/image/_7T_T2_Coronal_P222.nii.gz', # Bad # '/home/udall-raid2/DBS_collaborators/DBS_for_orens/DiseaseClassification/inference_db_stn_net/test/image/_7T_T2_Coronal_P234.nii.gz']] # Bad # # - DEBUG - # TODO: if mod == '7T_T2' if mod == '7T_T2' or '3T': # This is the reference scan (all patients must have it) - use it to determine how many patients we have self.patient_len = len(tmp_filenames) # Assume we always start from 7T_T2 Axial scans tmp_data = load_nifti_img(self.image_filenames[0][0], dtype=np.int16) self.image_dims = tmp_data[0].shape # report the number of images in the dataset print('Number of {0} images: {1} Patients'.format( split, self.__len__())) # data augmentation # NOTE: in this case, disable the add dimension transform! #self.transform = transform self.transform = ts.Compose( [ts.ToTensor(), ts.TypeCast(['float', 'long'])]) # data load into the ram memory self.t2_headers = [] self.preload_data = preload_data if self.preload_data: print('Preloading the {0} dataset ...'.format(split)) #self.raw_images = [load_nifti_img(ii, dtype=np.int16)[0] for ii in self.image_filenames] # Output is a list # Concatenate the raw data along the channels dimension self.raw_images = [] for jj in range(len(self.image_filenames[0]) ): # Per each patient, go over all modalities internal_cntr = 0 for ii in range(len(self.image_filenames) ): # Go over all patients, left and right #print('File: {}'.format(self.image_filenames[ii][jj])) # Only for DEBUG if internal_cntr == 0: # First time - should always be T2 axial q_dat, tmp_header, _ = load_nifti_img( self.image_filenames[ii][jj], dtype=np.float32 ) # normalize values to [0,1] range # if self.TypeOfModal[0] == '7T_T2_cor': # # Only for coronal slices # q_dat = np.transpose(q_dat, (0, 2, 1)) tmp_data = np.expand_dims(q_dat / np.max(q_dat.reshape(-1)), axis=0) tmp_name = self.image_filenames[ii][ jj] # For the header file - identification in the multi GPU case else: # Concatenate additional channels q_dat, _, _ = load_nifti_img( self.image_filenames[ii][jj], dtype=np.float32 ) # normalize values to [0,1] range # if self.TypeOfModal[0] == '7T_T2_cor': # # Only for coronal slices # q_dat = np.transpose(q_dat, (0, 2, 1)) concat_data = np.expand_dims(q_dat / np.max(q_dat.reshape(-1)), axis=0) tmp_data = np.concatenate((tmp_data, concat_data), axis=0) internal_cntr += 1 # Add the concatenated multichannel data to the list self.raw_images.append(tmp_data) tmp_header['db_name'] = re.search( '_P(.*).nii.gz', tmp_name).group(1) # Data identifier self.t2_headers.append(tmp_header) print('Loading is done\n')
def __init__(self, root_dir, split, transform=None, preload_data=False, rank=0, world_size=1): super(CMR3DDataset_MultiClass_MultiProj_V2, self).__init__() # TODO: make this an external parameter? internal_hist_augmentation_flag = 0 # TODO: make this an external parameter? #self.TypeOfModal = ['7T_T2', '7T_T1', '7T_DTI_FA'] self.TypeOfModal = ['7T_T2'] self.TypeOfProj = ['Axial', 'Coronal'] # For now we assume all projections are axial - no coronal projections image_dir = join(root_dir, split, 'image') target_dir = join(root_dir, split, 'label') self.image_filenames = [] for mod in self.TypeOfModal: tmp_list = [] for prj in self.TypeOfProj: tmp_str = [ join(image_dir, x) for x in listdir(image_dir) if (is_image_file(x) and x.find(mod) != -1 and x.find(prj) != -1) ] tmp_list.append(sorted(tmp_str)) # if mod == '7T_T2' and prj == 'Axial': # self.patient_len = len(tmp_str) self.image_filenames.append(tmp_list) # Assume we always start from 7T_T2 Axial scans tmp_data = load_nifti_img(self.image_filenames[0][0][0], dtype=np.int16) self.image_dims = tmp_data[0].shape self.target_filenames = sorted([ join(target_dir, x) for x in listdir(target_dir) if is_image_file(x) ]) #assert len(self.image_filenames) == len(self.target_filenames) # Divide data to each rank grp_size = math.ceil(len(self.target_filenames) / world_size) self.image_filenames[0][0] = self.image_filenames[0][0][rank * grp_size: (rank + 1) * grp_size] self.image_filenames[0][1] = self.image_filenames[0][1][rank * grp_size: (rank + 1) * grp_size] self.target_filenames = self.target_filenames[rank * grp_size:(rank + 1) * grp_size] self.patient_len = len(self.target_filenames) # print("len(self.target_filenames (rank {}) = {})".format(rank, len(self.target_filenames ))) # print(self.image_filenames[0][1]) # report the number of images in the dataset print('Number of {0} images: {1} Patients'.format( split, self.__len__())) # data augmentation # NOTE: in this case, disable the add dimension transform! #self.transform = transform #self.transform = ts.TypeCast(['float', 'long']) self.transform = ts.Compose( [ts.ToTensor(), ts.TypeCast(['float', 'long'])]) # data load into the ram memory self.preload_data = preload_data if self.preload_data: print('Preloading the {0} dataset ...'.format(split)) #self.raw_images = [load_nifti_img(ii, dtype=np.int16)[0] for ii in self.image_filenames] # Output is a list # Concatenate the raw data along the channels dimension self.raw_images = [] # This will be a list of lists # Per each patient, go over all modalities and projections for jj in range(len(self.image_filenames[0][0])): #for jj in range(len([1])): tmp_data_list = [] # Go over all projections for kk in range(len(self.image_filenames[0])): internal_cntr = 0 # Go over all modalities (T2, T1, ...) of similar projection for ii in range(len(self.image_filenames)): # NOTE: Coronal data is already permuted in the correct directions if self.image_filenames[ii][ kk] != []: # Check if the data exists #print(self.image_filenames[ii][kk][jj]) # For DEBUG if internal_cntr == 0: # First time q_dat = load_nifti_img( self.image_filenames[ii][kk][jj], dtype=np.float32)[ 0] # normalize values to [0,1] range q_dat = q_dat / np.max( q_dat.ravel()) # Normalize # Do image histogram augmentation if internal_hist_augmentation_flag == 1: q_dat = adaptive_hist_aug(q_dat) ### TEST #q_dat = q_dat[96-32:96+32, 96-32:96+32, 96-32:96+32] ########## tmp_data = np.expand_dims(q_dat, axis=0) else: # Concatenate additional channels q_dat = load_nifti_img( self.image_filenames[ii][kk][jj], dtype=np.float32)[ 0] # normalize values to [0,1] range q_dat / np.max(q_dat.ravel()) # Do image histogram augmentation if internal_hist_augmentation_flag == 1: q_dat = adaptive_hist_aug(q_dat) ### TEST #q_dat = q_dat[96-32:96+32, 96-32:96+32, 96-32:96+32] ########## concat_data = np.expand_dims(q_dat, axis=0) tmp_data = np.concatenate( (tmp_data, concat_data), axis=0) internal_cntr += 1 # Append for all modalities per same projection tmp_data_list.append(tmp_data) # Add the concatenated multichannel data to the list # [0] - Axial, [1] - Coronal self.raw_images.append(tmp_data_list) self.raw_labels = [ load_nifti_img(ii, dtype=np.uint8)[0] for ii in self.target_filenames ] # ### TEST # self.raw_labels = [] # for ii in self.target_filenames: # tmp_tmp = load_nifti_img(ii, dtype=np.uint8)[0] # self.raw_labels.append(tmp_tmp[96-32:96+32, 96-32:96+32, 96-32:96+32]) # ########## print('Loading is done\n')
def __init__(self, root_dir, split, transform=None, preload_data=True): super(CMR3DDataset_MultiClass_MultiProj_unreg, self).__init__() # TODO: make this a parameter self.TypeOfModal = ['7T_T2', '7T_T1', '7T_DTI_FA'] #self.TypeOfModal = ['7T_T2'] # For now we assume all projections are axial - no coronal projections image_dir = join(root_dir, split, 'image') target_dir = join(root_dir, split, 'label') self.image_filenames = [] for mod in self.TypeOfModal: tmp_filenames = [ join(image_dir, x) for x in listdir(image_dir) if (is_image_file(x) and x.find(mod) != -1) ] self.image_filenames.append(sorted(tmp_filenames)) # TODO: if mod == '7T_T2' if mod == '7T_T2': # This is the reference scan (all patients must have it) - use it to determine how many patients we have self.patient_len = len(tmp_filenames) self.target_filenames = sorted([ join(target_dir, x) for x in listdir(target_dir) if is_image_file(x) ]) #assert len(self.image_filenames) == len(self.target_filenames) # Assume we always start from 7T_T2 Axial scans tmp_data, meta = load_nifti_img(self.image_filenames[0][0], dtype=np.int16) #self.image_dims = tmp_data[0].shape self.image_dims = tmp_data.shape # report the number of images in the dataset print('Number of {0} images: {1} Patients'.format( split, self.__len__())) # data augmentation # NOTE: in this case, disable the add dimension transform! #self.transform = transform self.transform = ts.Compose( [ts.ToTensor(), ts.TypeCast(['float', 'long'])]) # data load into the ram memory self.preload_data = preload_data if self.preload_data: print('Preloading the {0} dataset ...'.format(split)) #self.raw_images = [load_nifti_img(ii, dtype=np.int16)[0] for ii in self.image_filenames] # Output is a list # Concatenate the raw data along the channels dimension self.raw_images = [] for jj in range(len(self.image_filenames[0]) ): # Per each patient, go over all modalities tmp_data = [] for ii in range(len( self.image_filenames)): # Go over all patients #print('File: {}'.format(self.image_filenames[ii][jj])) # Only for DEBUG q_dat = load_nifti_img( self.image_filenames[ii][jj], dtype=np.float32)[0] # normalize values to [0,1] range tmp_data.append( np.expand_dims(q_dat / np.max(q_dat.reshape(-1)), axis=0)) # Add the concatenated multichannel data to the list self.raw_images.append(tmp_data) self.raw_labels = [ load_nifti_img(ii, dtype=np.uint8)[0] for ii in self.target_filenames ] print('Loading is done\n')
def __init__(self, root_dir, split, transform=None, preload_data=True, modalities=['7T_T2'], rank=0): super(CMR3DDataset_MultiClass_MultiProj, self).__init__() # TODO: make this a parameter #self.TypeOfModal = ['7T_DTI_B0', '7T_DTI_FA'] # If we use B0 as well for the Thalamus seg ### --- #self.TypeOfModal = ['7T_T2'] # For T2 axial #self.TypeOfModal = ['7T_T2_cor'] # For T2 coronal #self.TypeOfModal = ['7T_SWI'] self.TypeOfModal = modalities if rank == 0: print("Modalities: {}".format(self.TypeOfModal)) # For now we assume all projections are axial - no coronal projections image_dir = join(root_dir, split, 'image') target_dir = join(root_dir, split, 'label') self.image_filenames = [] for mod in self.TypeOfModal: if mod == '7T_T2_cor': mod = '7T_T2' tmp_filenames = [ join(image_dir, x) for x in listdir(image_dir) if (is_image_file(x) and x.find(mod) != -1) ] self.image_filenames.append(sorted(tmp_filenames)) # TODO: if mod == '7T_T2' if mod == '7T_T2' or mod == '7T_SWI': # This is the reference scan (all patients must have it) - use it to determine how many patients we have self.patient_len = len(tmp_filenames) elif mod == '7T_T1': # Secondary priority self.patient_len = len(tmp_filenames) elif mod == '7T_DTI_B0': # Tertiary priority self.patient_len = len(tmp_filenames) elif mod == '3T_T2': # Fourth priority self.patient_len = len(tmp_filenames) self.target_filenames = sorted([ join(target_dir, x) for x in listdir(target_dir) if is_image_file(x) ]) #assert len(self.image_filenames) == len(self.target_filenames) if rank == 0: print("\n".join(self.target_filenames)) # Assume we always start from 7T_T2 Axial scans tmp_data = load_nifti_img(self.image_filenames[0][0], dtype=np.int16) self.image_dims = tmp_data[0].shape # report the number of images in the dataset if rank == 0: print('Number of {0} images: {1} Patients'.format( split, self.__len__())) # data augmentation # NOTE: in this case, disable the add dimension transform! #self.transform = transform self.transform = ts.Compose( [ts.ToTensor(), ts.TypeCast(['float', 'long'])]) # data load into the ram memory self.t2_headers = [] self.preload_data = preload_data if self.preload_data: if rank == 0: print('Preloading the {0} dataset ...'.format(split)) #self.raw_images = [load_nifti_img(ii, dtype=np.int16)[0] for ii in self.image_filenames] # Output is a list # Concatenate the raw data along the channels dimension self.raw_images = [] for jj in range(len(self.image_filenames[0]) ): # Per each patient, go over all modalities internal_cntr = 0 for ii in range(len(self.image_filenames) ): # Go over all patients, left and right #print('File: {}'.format(self.image_filenames[ii][jj])) # Only for DEBUG if internal_cntr == 0: # First time q_dat, tmp_header, _ = load_nifti_img( self.image_filenames[ii][jj], dtype=np.float32 ) # normalize values to [0,1] range # if self.TypeOfModal[0] == '7T_T2_cor': # # Only for coronal slices # q_dat = np.transpose(q_dat, (0, 2, 1)) tmp_data = np.expand_dims(q_dat / np.max(q_dat.reshape(-1)), axis=0) tmp_name = self.image_filenames[ii][ jj] # For the header file - identification in the multi GPU case else: # Concatenate additional channels q_dat = load_nifti_img( self.image_filenames[ii][jj], dtype=np.float32)[ 0] # normalize values to [0,1] range # if self.TypeOfModal[0] == '7T_T2_cor': # # Only for coronal slices # q_dat = np.transpose(q_dat, (0, 2, 1)) concat_data = np.expand_dims(q_dat / np.max(q_dat.reshape(-1)), axis=0) tmp_data = np.concatenate((tmp_data, concat_data), axis=0) internal_cntr += 1 # Add the concatenated multichannel data to the list self.raw_images.append(tmp_data) tmp_header['db_name'] = re.search( '_P(.*).nii.gz', tmp_name).group(1) # Data identifier self.t2_headers.append(tmp_header) # Load labels #self.raw_labels = [load_nifti_img(ii, dtype=np.uint8)[0] for ii in self.target_filenames] self.raw_labels = [] for ii in self.target_filenames: label_tmp = load_nifti_img(ii, dtype=np.uint8)[0] # if self.TypeOfModal[0] == '7T_T2_cor': # # Only for coronal slices # label_tmp = np.transpose(label_tmp, (0, 2, 1)) self.raw_labels.append(label_tmp) if rank == 0: print('Loading is done\n')
for i in range(0,d.shape[0]): ref=d[i,0,:,:] ind=np.where(ref!=0) km= KMeans(n_clusters=2, random_state=0).fit((ref[ind]).reshape(-1, 1) ) TH1=np.amax(km.cluster_centers_) TH2=np.amin(km.cluster_centers_) coff1=0.5 coff2=0.5 mask_R=(MAP_B[i,0,:,:]>(TH1-(TH1-TH2)*coff1))*(MAP_B[i,0,:,:]!=0) mask_B=(MAP_R[i,0,:,:]<(TH2+(TH1-TH2)*coff2))*(MAP_R[i,0,:,:]!=0) B_MAP[i,0,:,:]=mask_B R_MAP[i,0,:,:]=mask_R return B_MAP.float(),R_MAP.float() data_transform1 = tensor_tf.Compose([ tensor_tf.RandomFlip(h=True, v=True, p=0.75), ]) data_transform2 = tensor_tf.Compose([ tensor_tf.RandomFlip(h=True, v=True, p=0.75), ]) affine_transform1=tensor_tf.RandomChoiceRotate([0,90,180,270]) affine_transform2=tensor_tf.RandomChoiceRotate([0,90,180,270]) affine_transform3=tensor_tf.RandomTranslate([20/255.,20/255.]) #data augmentation and reflection image processing for simulation,such as blurring and ghost effects train_set=cd.CustomDataset(img_list,img2_list, data_transform1=data_transform1, data_transform2=data_transform2, affine_transform1=affine_transform1, affine_transform2=affine_transform2,
def gsd_pCT_transform(self): ''' Data augmentation transformations for the Geneva Stroke dataset (pCT maps) :return: ''' train_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.TypeCast(['float', 'float']), ts.RandomFlip(h=True, v=True, p=self.random_flip_prob), # Todo Random Affine doesn't support channels --> try newer version of torchsample or torchvision # ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val, # zoom_range=self.scale_val, interp=('bilinear', 'nearest')), ts.ChannelsFirst(), #ts.NormalizeMedicPercentile(norm_flag=(True, False)), # Todo apply channel wise normalisation ts.NormalizeMedic(norm_flag=(True, False)), # Todo fork torchsample and fix the Random Crop bug # ts.ChannelsLast(), # seems to be needed for crop # ts.RandomCrop(size=self.patch_size), ts.TypeCast(['float', 'long']) ]) valid_transform = ts.Compose([ ts.ToTensor(), ts.Pad(size=self.scale_size), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), #ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.NormalizeMedic(norm_flag=(True, False)), # ts.ChannelsLast(), # ts.SpecialCrop(size=self.patch_size, crop_type=0), ts.TypeCast(['float', 'long']) ]) # train_transform = ts.Compose([ # ts.ToTensor(), # ts.Pad(size=self.scale_size), # ts.ChannelsFirst(), # ts.TypeCast(['float', 'long']) # ]) # valid_transform = ts.Compose([ # ts.ToTensor(), # ts.Pad(size=self.scale_size), # ts.ChannelsFirst(), # ts.TypeCast(['float', 'long']) # # ]) # train_transform = tf.Compose([ # tf.Pad(1), # tf.Lambda(lambda a: a.permute(3, 0, 1, 2)), # tf.Lambda(lambda a: a.float()), # ]) # valid_transform = tf.Compose([ # tf.Pad(1), # tf.Lambda(lambda a: a.permute(3, 0, 1, 2)), # tf.Lambda(lambda a: a.float()), # # ]) return {'train': train_transform, 'valid': valid_transform}
def __init__(self, root_dir, split, transform=None, preload_data=False): super(CMR3DDataset_t2_reg, self).__init__() # TODO: make this a parameter self.TypeOfModal = ['7T_T2'] self.TypeOfProj = ['Axial', 'Coronal'] # For now we assume all projections are axial - no coronal projections image_dir = join(root_dir, split, 'image') target_dir = join(root_dir, split, 'label') self.image_filenames = [] for mod in self.TypeOfModal: tmp_list = [] for prj in self.TypeOfProj: tmp_str = [ join(image_dir, x) for x in listdir(image_dir) if (is_image_file(x) and x.find(mod) != -1 and x.find(prj) != -1) ] tmp_list.append(sorted(tmp_str)) if mod == '7T_T2' and prj == 'Axial': self.patient_len = len(tmp_str) self.image_filenames.append(tmp_list) # Assume we always start from 7T_T2 Axial scans tmp_data = load_nifti_img(self.image_filenames[0][0][0], dtype=np.int16) self.image_dims = tmp_data[0].shape self.target_filenames = [] # No labels for this project # report the number of images in the dataset print('Number of {0} images: {1} Patients'.format( split, self.__len__())) # data augmentation # NOTE: in this case, disable the add dimension transform! #self.transform = transform #self.transform = ts.TypeCast(['float', 'long']) self.transform = ts.Compose( [ts.ToTensor(), ts.TypeCast(['float', 'long'])]) # data load into the ram memory self.preload_data = preload_data if self.preload_data: print('Preloading the {0} dataset ...'.format(split)) #self.raw_images = [load_nifti_img(ii, dtype=np.int16)[0] for ii in self.image_filenames] # Output is a list # Concatenate the raw data along the channels dimension self.raw_images = [] # This will be a list of lists # Per each patient, go over all modalities and projections #for jj in range(len(self.image_filenames[0][0])): # REAL for jj in range(len([0, 1])): # DEBUG tmp_data_list = [] # Go over all projections for kk in range(len(self.image_filenames[0])): internal_cntr = 0 # Go over all modalities (T2, T1, ...) of similar projection for ii in range(len(self.image_filenames)): # NOTE: Coronal data is already permuted in the correct directions if self.image_filenames[ii][ kk] != []: # Check if the data exists #print(self.image_filenames[ii][kk][jj]) # For DEBUG if internal_cntr == 0: # First time q_dat = load_nifti_img( self.image_filenames[ii][kk][jj], dtype=np.float32)[ 0] # normalize values to [0,1] range q_dat = q_dat / np.max( q_dat.reshape(-1)) # Normalize q_dat = self.zero_pad(q_dat) # zero pad tmp_data = np.expand_dims(q_dat, axis=0) else: # Concatenate additional channels q_dat = load_nifti_img( self.image_filenames[ii][kk][jj], dtype=np.float32)[ 0] # normalize values to [0,1] range q_dat = q_dat / np.max(q_dat.reshape(-1)) q_dat = self.zero_pad(q_dat) concat_data = np.expand_dims(q_dat, axis=0) tmp_data = np.concatenate( (tmp_data, concat_data), axis=0) internal_cntr += 1 # Append for all modalities per same projection tmp_data_list.append(tmp_data) # Add the concatenated multichannel data to the list # [0] - Axial, [1] - Coronal self.raw_images.append(tmp_data_list) self.raw_labels = [ f for f in range(len(self.image_filenames) + 1) ] # Dummy: no labels for this project print('Loading is done\n')
#plt.scatter(inc_angle_tr, target) #plt.xlim(0, 48) #plt.hist(inc_angle_test, bins=100) #plt.hist(inc_angle_tr, bins=100) plt.show() del data full_img_tr = np.stack([band_1_tr, band_2_tr], axis=1) my_transforms = transforms.RandomAffine(rotation_range=180, translation_range=0.2, shear_range=None, zoom_range=(0.8, 1.2)) my_transforms = transforms.Compose(my_transforms.transforms) test_imgs = torch.from_numpy(full_img_tr).float().cuda() test_dataset = TensorDataset(test_imgs, input_transform=my_transforms) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False) print("loader len:", len(test_loader)) while (True): #index = np.random.randint(0, len(data), 1) index = 0 for _data in tqdm(test_loader, total=len(test_loader)): print(index) plot_sample(_data.squeeze_().cpu()) index += 1 best_gmm = fit_gmm(band_1_tr[index])
print(comment) patient_id_G_test = random.sample(patient_id_G, 20) patient_id_H_test = random.sample(patient_id_H, 200) patient_id_G_train = list(patient_id_G.difference(patient_id_G_test)) patient_id_H_train = list(patient_id_H.difference(patient_id_H_test)) transform_pipeline_train = tr.Compose([ AddGaussian(), AddGaussian(ismulti=False), tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float'), # Attenuation((-.001, .1)), # tr.RangeNormalize(0,1), tr.RandomBrightness(-.2, .2), tr.RandomGamma(.9, 1.1), tr.RandomFlip(), tr.RandomAffine(rotation_range=5, translation_range=0.2, zoom_range=(0.9, 1.1)) ]) transform_pipeline_test = tr.Compose([ tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float') # tr.RangeNormalize(0, 1) ])
from utils import ScalarEncoder, accuracy, AverageMeter, make_dataset, save_model, print_metrics from logger import Logger from sklearn.model_selection import KFold data = pd.read_json("data/train.json") data["band_1"] = data["band_1"].apply(lambda x: np.array(x).reshape(75, 75)) data["band_2"] = data["band_2"].apply(lambda x: np.array(x).reshape(75, 75)) data["inc_angle"] = pd.to_numeric(data["inc_angle"], errors="coerce") # Augmentation affine_transforms = transforms.RandomAffine(rotation_range=None, translation_range=0.1, zoom_range=(0.95, 1.05)) rand_flip = transforms.RandomFlip(h=True, v=False) std_normalize = transforms.StdNormalize() my_transforms = transforms.Compose([rand_flip, std_normalize]) # scalar encoder for incident angles encoder = ScalarEncoder(100, 30, 45) # using folding to create 5 train-validation sets to train 5 networks kf = KFold(n_splits=5, shuffle=True, random_state=100) kfold_datasets = [] networks = [] optimizers = [] for train_index, val_index in kf.split(data): train_dataset = make_dataset(data.iloc[train_index], encoder, my_transforms) val_dataset = make_dataset(data.iloc[val_index], encoder, my_transforms) kfold_datasets.append({"train": train_dataset, "val": val_dataset}) # A new net for each train-validation dataset networks.append(Net().cuda()) optimizers.append(Adam(networks[-1].parameters(), lr=0.0005, weight_decay=0.0002))