Пример #1
0
 def __call__(self, kspace, target, attrs, fname, slice):
     """
     Args:
         kspace (numpy.Array): k-space measurements
         target (numpy.Array): Target image
         attrs (dict): Acquisition related information stored in the HDF5 object
         fname (pathlib.Path): Path to the input file
         slice (int): Serial number of the slice
     Returns:
         (tuple): tuple containing:
             image (torch.Tensor): Normalized zero-filled input image
             mean (float): Mean of the zero-filled image
             std (float): Standard deviation of the zero-filled image
             fname (pathlib.Path): Path to the input file
             slice (int): Serial number of the slice
     """
     kspace = transforms.to_tensor(kspace)
     if self.mask_func is not None:
         seed = tuple(map(ord, fname))
         mask = transforms.get_mask(kspace, self.mask_func, seed)
         masked_kspace = mask * kspace
     else:
         masked_kspace = kspace
     # Inverse Fourier Transform to get zero filled solution
     image = transforms.ifft2(masked_kspace)
     # Crop input image
     image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
     # Absolute value
     image = transforms.complex_abs(image)
     # Apply Root-Sum-of-Squares if multicoil data
     if self.which_challenge == 'multicoil':
         image = transforms.root_sum_of_squares(image)
     # Normalize input
     image, mean, std = transforms.normalize_instance(image)
     image = image.clamp(-6, 6)
     return image, mean, std, fname, slice
Пример #2
0
    def __call__(self, kspace, target, attrs, fname, slice):
        kspace_square = transforms.to_tensor(kspace)  ##kspace
        image_square = ifft_c3(kspace_square)

        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace_square, mask = transforms.apply_mask2(
            kspace_square, self.mask_func, seed)  ##ZF square kspace
        image_square_us = ifft_c3(
            masked_kspace_square)  ## US square complex imagea
        sz = kspace_square.size
        stacked_kspace_square = kspace_square.permute((0, 3, 1, 2)).reshape(
            (-1, sz(-3), sz(-2)))
        stacked_masked_kspace_square = masked_kspace_square.permute(
            (0, 3, 1, 2)).reshape((-1, sz(-3), sz(-2)))

        stacked_image_square = image_square.permute((0, 3, 1, 2)).reshape(
            (-1, sz(-3), sz(-2)))
        us_image_square_rss = torch.sqrt(
            torch.sum(image_square_us**2, dim=(0, -1)))
        image_square_rss = torch.sqrt(torch.sum(image_square**2, dim=(0, -1)))

        return stacked_kspace_square,stacked_masked_kspace_square , stacked_image_square , \
            us_image_square_rss ,   \
            image_square_rss
Пример #3
0
mode = 'validation'
data_list = load_traindata_path(dataset_dir, name)
save_dir = '/media/htic/NewVolume3/Balamurali/knee_mri_vsnet/coronal_dp_h5/{}'.format(
    mode)

#center_fract = 0.08
#acc   = 4
#shape = [1,640,368,2]

#mask_func = MaskFunc(center_fractions=[center_fract], accelerations=[acc])
#mask = mask_func(shape) ##
#mask = T.ifftshift(mask)

mask_path = '/media/htic/NewVolume3/Balamurali/knee_mri_vsnet/coronal_pd/masks/random4_masks_640_368.mat'
mask = loadmat(mask_path)['mask']
maskT = T.to_tensor(mask)

for rawdata_name, coil_name, folder_name, file_name in tqdm(data_list[mode]):

    rawdata = np.complex64(loadmat(rawdata_name)['rawdata']).transpose(2, 0, 1)
    sensitivity = np.complex64(loadmat(coil_name)['sensitivities'])

    rawdata2 = T.to_tensor(rawdata)
    sensitivity2 = T.to_tensor(sensitivity.transpose(2, 0, 1))

    img_und, img_gt, img_und_kspace, rawdata_und, masks, sensitivity = data_for_training(
        rawdata2, sensitivity2, maskT)

    #print (img_und.shape,img_gt.shape,img_und_kspace.shape,rawdata_und.shape,masks.shape,sensitivity.shape)

    img_und_np = img_und.numpy()
Пример #4
0
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        kspace = transforms.to_tensor(kspace)
        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        mask = transforms.get_mask(kspace, self.mask_func, seed)
        masked_kspace = mask * kspace

        # Inverse Fourier Transform to get zero filled solution
        image_precrop = transforms.ifft2(masked_kspace)
        masked_kspace, _, _ = transforms.normalize_instance_complex(
            masked_kspace, eps=1e-11)
        kspace, _, _ = transforms.normalize_instance_complex(kspace, eps=1e-11)
        # Crop input image
        image = transforms.complex_center_crop(
            image_precrop, (self.resolution, self.resolution))

        image_abs = transforms.complex_abs(image)
        _, mean_abs, std_abs = transforms.normalize_instance(image_abs,
                                                             eps=1e-11)
        # Normalize input
        image, mean, std = transforms.normalize_instance_complex(image,
                                                                 eps=1e-11)
        image = image.clamp(-6, 6)

        target = transforms.to_tensor(target)
        target_train = target
        if not TRAIN_COMPLEX:

            # Normalize target
            if RENORM:
                target_train = transforms.normalize(target,
                                                    mean_abs,
                                                    std_abs,
                                                    eps=1e-11)
                target_train = target_train.clamp(
                    -6, 6
                )  # Return target (for viz) and target_clamped (for training)
        else:
            target_train = transforms.ifft2(kspace)
            target_train = transforms.complex_center_crop(
                target_train, (self.resolution, self.resolution))

            if RENORM:
                target_train = transforms.normalize(target_train,
                                                    mean,
                                                    std,
                                                    eps=1e-11)
        return image, target_train, mean, std, attrs['norm'].astype(
            np.float32
        ), target, mean_abs, std_abs, kspace, masked_kspace, image_precrop
Пример #5
0
def find_unmask(mask):
    unmask = np.where(mask == 1.0, 0.0, 1.0)
    unmask = transforms.to_tensor(unmask)
    unmask = unmask.float()
    return unmask
Пример #6
0
    def apply(self, ksp_torch):
        ksp_npy = ksp_torch[:, :, 0].numpy() + 1j * ksp_torch[:, :, 1].numpy()

        return transforms.to_tensor(randomflip(ksp_npy) *
                                    self.translation()).float()
Пример #7
0
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        kspace_rect = transforms.to_tensor(kspace)  ##rectangular kspace

        image_rect = transforms.ifft2(kspace_rect)  ##rectangular FS image
        image_square = transforms.complex_center_crop(
            image_rect,
            (self.resolution, self.resolution))  ##cropped to FS square image
        kspace_square = transforms.fft2(image_square)  ##kspace of square iamge

        if self.augmentation:
            kspace_square = self.augmentation.apply(kspace_square)
            image_square = transforms.ifft2(kspace_square)

        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace_square, mask = transforms.apply_mask(
            kspace_square, self.mask_func, seed)  ##ZF square kspace

        # Inverse Fourier Transform to get zero filled solution
        # image = transforms.ifft2(masked_kspace)
        image_square_us = transforms.ifft2(
            masked_kspace_square)  ## US square complex image

        # Crop input image
        # image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
        # Absolute value
        # image = transforms.complex_abs(image)
        image_square_abs = transforms.complex_abs(
            image_square_us)  ## US square real image

        # Apply Root-Sum-of-Squares if multicoil data
        # if self.which_challenge == 'multicoil':
        #     image = transforms.root_sum_of_squares(image)
        # Normalize input
        # image, mean, std = transforms.normalize_instance(image, eps=1e-11)
        _, mean, std = transforms.normalize_instance(image_square_abs,
                                                     eps=1e-11)
        # image = image.clamp(-6, 6)

        # target = transforms.to_tensor(target)
        target = image_square.permute(2, 0, 1)
        # Normalize target
        # target = transforms.normalize(target, mean, std, eps=1e-11)
        # target = target.clamp(-6, 6)
        # return image, target, mean, std, attrs['norm'].astype(np.float32)

        # return masked_kspace_square.permute((2,0,1)), image, image_square.permute(2,0,1), mean, std, attrs['norm'].astype(np.float32)

        # ksp, zf, target, me, st, nor
        return masked_kspace_square.permute((2,0,1)), image_square_us.permute((2,0,1)), \
            target,  \
            mean, std, attrs['norm'].astype(np.float32)
Пример #8
0
    def __call__(self, kspace, target, attrs, fname, slice):
        kspace_rect = transforms.to_tensor(kspace)  ##rectangular kspace

        image_rect = transforms.ifft2(kspace_rect)  ##rectangular FS image
        image_square = transforms.complex_center_crop(
            image_rect,
            (self.resolution, self.resolution))  ##cropped to FS square image

        kspace_square = self.c3object.apply(
            transforms.fft2(image_square)) * 10000  ##kspace of square iamge
        image_square2 = ifft_c3(kspace_square)  ##for training domain_transform

        if self.augmentation:
            kspace_square = self.augmentation.apply(kspace_square)

        # image_square = ifft_c3(kspace_square)

        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace_square, mask = transforms.apply_mask(
            kspace_square, self.mask_func, seed)  ##ZF square kspace

        # Inverse Fourier Transform to get zero filled solution
        # image = transforms.ifft2(masked_kspace)
        us_image_square = ifft_c3(
            masked_kspace_square)  ## US square complex image

        # Crop input image
        # image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
        # Absolute value
        # image = transforms.complex_abs(image)
        us_image_square_abs = transforms.complex_abs(
            us_image_square)  ## US square real image
        us_image_square_rss = transforms.root_sum_of_squares(
            us_image_square_abs, dim=0)

        stacked_kspace_square = []
        for i in (range(len(kspace_square[:, 0, 0, 0]))):
            stacked_kspace_square.append(kspace_square[i, :, :, 0])
            stacked_kspace_square.append(kspace_square[i, :, :, 1])

        stacked_kspace_square = torch.stack(stacked_kspace_square)

        stacked_masked_kspace_square = []
        # masked_kspace_square = transforms.to_tensor(masked_kspace_square)
        # for i in range(len(masked_kspace_square[:,0,0,0])):
        # stacked_masked_kspace_square.stack(masked_kspace_square[i,:,:,0],masked_kspace_square[i,:,:,1])

        for i in (range(len(masked_kspace_square[:, 0, 0, 0]))):
            stacked_masked_kspace_square.append(masked_kspace_square[i, :, :,
                                                                     0])
            stacked_masked_kspace_square.append(masked_kspace_square[i, :, :,
                                                                     1])

        stacked_masked_kspace_square = torch.stack(
            stacked_masked_kspace_square)

        stacked_image_square = []
        for i in (range(len(image_square[:, 0, 0, 0]))):
            stacked_image_square.append(image_square2[i, :, :, 0])
            stacked_image_square.append(image_square2[i, :, :, 1])

        stacked_image_square = torch.stack(stacked_image_square)




        return stacked_kspace_square,stacked_masked_kspace_square , stacked_image_square , \
            us_image_square_rss ,   \
            target *10000 \
            #mean, std, attrs['norm'].astype(np.float32)
        '''
Пример #9
0

datadir = '../multicoil_val/multicoil_val/'
outdir = datadir+'/../multicoil_val2/'

completed = []
for fi in glob.glob(datadir+'/*.h5'):
    with h5py.File(fi,'r') as h5:
        print(fi)
        volume_ksp = h5['kspace']
        print(volume_ksp.shape,volume_ksp.dtype)
        nslice,nch, ht, wd = volume_ksp.shape
        
        if wd < shp[1]:
            continue

        for sl in range(2,nslice-2):
            ksp = T.to_tensor(volume_ksp[sl])
            sq = tosquare(ksp,shp)
            
            with h5py.File('%s/%s-%.2d.h5' % (outdir,os.path.basename(fi)[:-3],sl),'w') as hw:
                hw['kspace']=sq.numpy()
        completed.append(fi)

    if len(completed)%3==0:    
        for rfi in completed:
            if os.path.exists(rfi):
                os.unlink(rfi)
#         break        

Пример #10
0
    def __call__(self, ksp_cmplx, fname, sensitivity, acceleration):
        """
        Args:
            kspace (numpy.array): Input k-space of the multi-coil data
            fname (str): File name
            sensitivity maps (numpy.array): ENLIVE sensitivity maps
            acceleartion: whether to train for 5x US ksp or 10x US kspace

        """

        sens_t = T.to_tensor(sensitivity)

        ksp_t = T.to_tensor(ksp_cmplx)
        ksp_t = ksp_t.permute(2, 0, 1, 3)
        img_gt = T.ifft2(ksp_t)
        img_gt_sens = T.combine_all_coils(img_gt, sens_t)

        img_gt_np = T.zero_filled_reconstruction(ksp_cmplx)

        if acceleration == 5:

            if ksp_t.shape[2] == 170:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R5_218x170.npy"
                )
            elif ksp_t.shape[2] == 174:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R5_218x174.npy"
                )
            elif ksp_t.shape[2] == 180:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R5_218x180.npy"
                )

        elif acceleration == 10:

            if ksp_t.shape[2] == 170:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R10_218x170.npy"
                )
            elif ksp_t.shape[2] == 174:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R10_218x174.npy"
                )
            elif ksp_t.shape[2] == 180:
                sp_r5 = np.load(
                    "/media/student1/NewVolume/MR_Reconstruction/midl/MC-MRRec-challenge/Data/poisson_sampling/R10_218x180.npy"
                )

        randint = random.randint(0, 99)  #to get a random mask everytime !
        mask = sp_r5[randint]
        mask = torch.from_numpy(mask)
        mask = (torch.stack((mask, mask), dim=-1)).float()

        ksp_us = torch.where(mask == 0, torch.Tensor([0]), ksp_t)

        img_us = T.ifft2(ksp_us)
        img_us_sens = T.combine_all_coils(img_us, sens_t)

        ksp_us_np = ksp_us.numpy()
        ksp_us_cmplx = ksp_us_np[:, :, :, 0] + 1j * ksp_us_np[:, :, :, 1]
        ksp_us_cmplx = ksp_us_cmplx.transpose(1, 2, 0)

        img_us_np = T.zero_filled_reconstruction(ksp_us_cmplx)

        pha_gt = T.phase(img_gt_sens)
        pha_us = T.phase(img_us_sens)

        pha_gt = pha_gt + 3.1415927410125732
        pha_us = pha_us + 3.1415927410125732

        mag_gt = T.complex_abs(img_gt_sens)
        mag_us = T.complex_abs(img_us_sens)

        mag_gt_pad = T.pad(mag_gt, [256, 256])
        mag_us_pad = T.pad(mag_us, [256, 256])

        pha_gt_pad = T.pad(pha_gt, [256, 256])
        pha_us_pad = T.pad(pha_us, [256, 256])

        return mag_us_pad / mag_us_pad.max(), mag_gt_pad / mag_us_pad.max(
        ), pha_us_pad, pha_gt_pad, ksp_us / mag_us_pad.max(
        ), img_us_sens / mag_us_pad.max(), img_gt_sens / mag_us_pad.max(
        ), img_us_np / img_us_np.max(), img_gt_np / img_us_np.max(
        ), sens_t, mask, img_us_np.max(), fname
Пример #11
0
file = '../fastMRIData/singlecoil_val/file1000000.h5'
hf = h5py.File(file)

print('Keys:', list(hf.keys()))
print('Attrs:', dict(hf.attrs))

volume_kspace = hf['kspace'][()]
print(volume_kspace.dtype)
print(volume_kspace.shape)

slice_kspace = volume_kspace  # Choosing the 20-th slice of this volume
show_slices(np.log(np.abs(slice_kspace) + 1e-9), [0, 5, 10])

# In[9]:

slice_kspace2 = T.to_tensor(
    slice_kspace)  # Convert from numpy array to pytorch tensor
slice_image = T.ifft2(
    slice_kspace2)  # Apply Inverse Fourier Transform to get the complex image
slice_image_abs = T.complex_abs(
    slice_image)  # Compute absolute value to get a real image

# In[10]:

show_slices(slice_image_abs, [0, 5, 10], cmap='gray')

# As we can see, each slice in a multi-coil MRI scan focusses on a different region of the image. These slices can be combined into the full image using the Root-Sum-of-Squares (RSS) transform.

# In[11]:

slice_image_rss = T.root_sum_of_squares(slice_image_abs, dim=0)
Пример #12
0
def get_attack_loss_new(model, ori_target, loss_f=torch.nn.MSELoss(reduction='none'), 
    xs=np.random.randint(low=100, high=320-100, size=(16,)), 
    ys=np.random.randint(low=100, high=320-100, size=(16,)), 
    shape=(320, 320), n_pixel_range=(10, 11), train=False, optimizer=None):
    
    input_o = ori_target.unsqueeze(1).to(args.device)
    input_o = input_o.clone()
    
    #input_o = transforms.complex_abs(ori_input.clone())
    #input_o, mean, std = transforms.normalize_instance(ori_target.unsqueeze(1).clone())
    #input_o = torch.clamp(input_o, -6, 6)

    #perturb_noise = perturb_noise_init(x=x, y=y, shape=shape, n_pixel_range=n_pixel_range)
    p_max = input_o.max().cpu()
    #p_min = (p_max - input.min()) / 2
    #p_min = (p_max - input_o.min())
    p_min = input_o.min().cpu()
    perturb_noise = [perturb_noise_init(x=x, y=y, shape=shape, n_pixel_range=n_pixel_range, pixel_value_range=(p_min, p_max)) for x, y in zip(xs, ys)]
    perturb_noise = np.stack(perturb_noise)
            
    # perturb the target to get the perturbed image
    #perturb_noise = np.expand_dims(perturb_noise, axis=0)
    #perturb_noise = np.stack((perturb_noise,)*ori_target.shape(0), -1)

    seed = np.random.randint(999999999)
    
    
    perturb_noise = transforms.to_tensor(perturb_noise).unsqueeze(1).to(args.device)
    
    if not args.fnaf_eval_control:
        input_o += perturb_noise
    target = input_o.clone()
    
    #print(input_o.shape)
    input_o = np.complex64(input_o.cpu().numpy())
    input_o = transforms.to_tensor(input_o)
    input_o = transforms.fft2(input_o)
    input_o, mask = transforms.apply_mask(input_o, mask_f, seed)
    input_o = transforms.ifft2(input_o)
    
    image = transforms.complex_abs(input_o).to(args.device)
    image, mean, std = transforms.normalize_instance(image, eps=1e-11)
    image = image.clamp(-6, 6)
    
    target = transforms.normalize(target, mean, std, eps=1e-11)
    target = target.clamp(-6, 6)

    #information_loss = loss_f(og_image.squeeze(1), image.squeeze(1)).mean(-1).mean(-1).cpu().numpy()
    #information_loss = np.array([0]*len(xs))

    # apply the perturbed image to the model to get the loss
    if train:
        output = model(image).squeeze(1)
    else:
        with torch.no_grad():
            output = model(image).squeeze(1)
            
    #perturb_noise_tensor = transforms.to_tensor(perturb_noise).to(args.device, dtype=torch.double)
    perturb_noise = perturb_noise.squeeze(1)
    perturb_noise_tensor = perturb_noise
    
    perturb_noise = perturb_noise.cpu().numpy()
        
    mask = adjusted_mask((perturb_noise > 0).astype(np.double))
    #mask = (perturb_noise > 0).astype(np.double)
    

        
    target = target.squeeze(1)
    mask_0 = transforms.to_tensor(mask).to(args.device)

    loss = loss_f((output*mask_0), (target*mask_0))

    if train:
        b_loss = loss.sum() / mask_0.sum() * 1 + loss_f(output, target).mean()
        b_loss.backward()
        optimizer.step()
        loss = loss.detach()

        loss = loss.mean(-1).mean(-1).cpu().numpy()
    #loss = loss.mean(-1).mean(-1).numpy()

    # information_loss_list.append(information_loss)
    # xs_list.append(xs)
    # ys_list.append(ys)
    
    
    return loss
Пример #13
0
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        target = transforms.to_tensor(target)
        kspace = transforms.to_tensor(kspace)
        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        fn_image = 0
        if args.fn_train:
            #fn_image_kspace = kspace.clone()
            fn_image = transforms.ifft2(kspace).clone()
            # Crop input image to given resolution if larger
            smallest_width = min(min(args.resolution, fn_image.shape[-2]), target.shape[-1])
            smallest_height = min(min(args.resolution, fn_image.shape[-3]), target.shape[-2])
            crop_size = (smallest_height, smallest_width)
            fn_image = transforms.complex_center_crop(fn_image, crop_size)

            # Absolute value
            fn_image = transforms.complex_abs(fn_image)
            # Apply Root-Sum-of-Squares if multicoil data
            if self.which_challenge == 'multicoil':
                fn_image = transforms.root_sum_of_squares(fn_image)



        masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func, seed)
        # Inverse Fourier Transform to get zero filled solution
        image = transforms.ifft2(masked_kspace)
        # Crop input image to given resolution if larger
        smallest_width = min(min(args.resolution, image.shape[-2]), target.shape[-1])
        smallest_height = min(min(args.resolution, image.shape[-3]), target.shape[-2])
        crop_size = (smallest_height, smallest_width)
        image = transforms.complex_center_crop(image, crop_size)
        target = transforms.center_crop(target, crop_size)

        # Absolute value
        image = transforms.complex_abs(image)
        # Apply Root-Sum-of-Squares if multicoil data
        if self.which_challenge == 'multicoil':
            image = transforms.root_sum_of_squares(image)
        # Normalize input
        image, mean, std = transforms.normalize_instance(image, eps=1e-11)
        image = image.clamp(-6, 6)

        # Normalize target
        target = transforms.normalize(target, mean, std, eps=1e-11)
        target = target.clamp(-6, 6)
        item = image, target, mean, std, attrs['norm'].astype(np.float32)
        if args.fn_train:
            item = image, target, mean, std, attrs['norm'].astype(np.float32), fn_image
        return item
Пример #14
0
def to_k_space(image):
    #image = image.numpy()
    image = np.complex64(image)
    image = transforms.to_tensor(image)
    return transforms.fft2(image)
Пример #15
0
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        kspace = transforms.to_tensor(kspace)
        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        if self.use_mask:
            mask = transforms.get_mask(kspace, self.mask_func, seed)
            masked_kspace = mask * kspace
        else:
            masked_kspace = kspace

        # Inverse Fourier Transform to get zero filled solution
        image = transforms.ifft2(masked_kspace)
        # Crop input image
        image = transforms.complex_center_crop(
            image, (self.resolution, self.resolution))
        # Absolute value
        image = transforms.complex_abs(image)
        # Apply Root-Sum-of-Squares if multicoil data
        if self.which_challenge == 'multicoil':
            image = transforms.root_sum_of_squares(image)

        # Normalize input
        if self.normalize:
            image, mean, std = transforms.normalize_instance(image, eps=1e-11)
            if CLAMP:
                image = image.clamp(-6, 6)
        else:
            mean = -1.0
            std = -1.0

        # Normalize target
        if target is not None:
            target = transforms.to_tensor(target)
            target_train = target
            if self.normalize:
                target_train = transforms.normalize(target,
                                                    mean,
                                                    std,
                                                    eps=1e-11)
                if CLAMP:
                    target_train = target_train.clamp(
                        -6, 6
                    )  # Return target (for viz) and target_clamped (for training)
            norm = attrs['norm'].astype(np.float32)
        else:
            target_train = []
            target = []
            norm = -1.0
        image_updated = []
        if os.path.exists(
                '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_train/'
                + fname):
            updated_fname = '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_train/' + fname
            with h5py.File(updated_fname, 'r') as data:
                image_updated = data['reconstruction'][slice]
                image_updated = transforms.to_tensor(image_updated)
        elif os.path.exists(
                '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_val/'
                + fname):
            updated_fname = '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_val/' + fname
            with h5py.File(updated_fname, 'r') as data:
                image_updated = data['reconstruction'][slice]
                image_updated = transforms.to_tensor(image_updated)
        elif os.path.exists(
                '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_test/'
                + fname):
            updated_fname = '/home/manivasagam/code/fastMRIPrivate/models/unet_volumes/reconstructions_test/' + fname
            with h5py.File(updated_fname, 'r') as data:
                image_updated = data['reconstruction'][slice]
                image_updated = transforms.to_tensor(image_updated)

        return image, target_train, mean, std, norm, target, image_updated
Пример #16
0
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        kspace = transforms.to_tensor(kspace)
        gt = transforms.ifft2(kspace)
        gt = transforms.complex_center_crop(gt, (self.resolution, self.resolution))
        kspace = transforms.fft2(gt)

        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func, seed)
        # Inverse Fourier Transform to get zero filled solution
        image = transforms.ifft2(masked_kspace)
        masked_kspace = transforms.fft2_nshift(image)
        # Crop input image
        image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
        # Absolute value
        image_mod = transforms.complex_abs(image).max()
        image_r = image[:, :, 0]*6.0/image_mod
        image_i = image[:, :, 1]*6.0/image_mod
        # image_r = image[:, :, 0]
        # image_i = image[:, :, 1]
        # Apply Root-Sum-of-Squares if multicoil data
        if self.which_challenge == 'multicoil':
            image = transforms.root_sum_of_squares(image)
        # Normalize input

        image = np.stack((image_r, image_i), axis=-1)
        image = image.transpose((2, 0, 1))
        image = transforms.to_tensor(image)

        target = transforms.ifft2(kspace)
        target = transforms.complex_center_crop(target, (self.resolution, self.resolution))
        # Normalize target
        target_r = target[:, :, 0]*6.0/image_mod
        target_i = target[:, :, 1]*6.0/image_mod
        # target_r = target[:, :, 0]
        # target_i = target[:, :, 1]

        target = np.stack((target_r, target_i), axis=-1)
        target = target.transpose((2, 0, 1))
        target = transforms.to_tensor(target)

        image_mod = np.stack((image_mod, image_mod), axis=0)
        image_mod = transforms.to_tensor(image_mod)

        norm = attrs['norm'].astype(np.float32)
        norm = np.stack((norm, norm), axis=-1)
        norm = transforms.to_tensor(norm)

        mask = mask.expand(kspace.shape)
        mask = mask.transpose(0, 2).transpose(1, 2)
        mask = transforms.ifftshift(mask)

        masked_kspace = masked_kspace.transpose(0, 2).transpose(1, 2)

        return image, target