Exemplo n.º 1
0
    def __call__(self, kspace, target, attrs, file_name, slice_num):
        """
        Args:
            kspace (numpy.Array): k-space measurements
            target (numpy.Array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object
            file_name (str): File name
            slice_num (int): Serial number of the slice
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Normalized zero-filled input image
                mean (float): Mean of the zero-filled image
                std (float): Standard deviation of the zero-filled image
                file_name (str): File name
                slice_num (int): Serial number of the slice
        """
        kspace = to_tensor(kspace)
        if self.mask_func is not None:  # Validation set
            seed = tuple(map(ord, file_name))
            masked_kspace, _ = apply_mask(kspace, self.mask_func, seed)
        else:  # Test set
            masked_kspace = kspace

        data = k_slice_to_chw(masked_kspace)
        pad = (self.divisor - (data.shape[-1] % self.divisor)) // 2
        pad = [pad, pad]
        data = F.pad(data, pad=pad,
                     value=0)  # This pads at the last dimension of a tensor.
        return data
    def __call__(self, k_slice, target, attrs, file_name, slice_num):
        if k_slice.ndim == 2:  # For singlecoil. Makes data processing later on much easier.
            k_slice = np.expand_dims(k_slice, axis=0)
        elif k_slice.ndim != 3:  # Prevents possible errors.
            raise TypeError('Invalid slice dimensions.')

        # I hope that async copy works for passing between processes but I am not sure.
        kspace_target = to_tensor(k_slice).to(device=self.device,
                                              non_blocking=True)

        # Necessary since None cannot pass the default collate function.
        # Target must be sent to GPU for evaluation with outputs.
        target = 0 if target is None else to_tensor(target).to(
            device=self.device, non_blocking=True)

        return kspace_target, target, attrs, file_name, slice_num
Exemplo n.º 3
0
    def __call__(self, k_slice, target, attrs, file_name, slice_num):
        """
        Args:
            k_slice (numpy.array): Input k-space of shape (num_coils, height, width) for multi-coil
                data or (rows, cols) for single coil data.
            target (numpy.array): Target (320x320) image. May be None.
            attrs (dict): Acquisition related information stored in the HDF5 object.
            file_name (str): File name
            slice_num (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                data (torch.Tensor): kspace data converted to CHW format for CNNs, where C=(2*num_coils).
                    Also has padding in the width axis for auto-encoders, which have down-sampling regions.
                    This requires the data to be divisible by some number (usually 2**num_pooling_layers).
                    Otherwise, concatenation will not work in the decoder due to different sizes.
                    Only the width dimension is padded in this case due to the nature of the dataset.
                    The height is fixed at 640, while the width is variable.
                labels (torch.Tensor): Coil-wise ground truth images. Shape=(num_coils, H, W)
        """
        assert np.iscomplexobj(k_slice), 'kspace must be complex.'
        assert k_slice.shape[-1] % 2 == 0, 'k-space data width must be even.'

        if k_slice.ndim == 2:  # For singlecoil. Makes data processing later on much easier.
            k_slice = np.expand_dims(k_slice, axis=0)
        elif k_slice.ndim != 3:  # Prevents possible errors.
            raise TypeError('Invalid slice type')

        with torch.no_grad():  # Remove unnecessary gradient calculations.
            # Now a Tensor of (num_coils, height, width, 2), where 2 is (real, imag).
            # The data is in the GPU and has been amplified by the amplification factor.
            k_slice = to_tensor(k_slice).to(device=self.device) * self.amp_fac
            # k_slice = to_tensor(k_slice).cuda(self.device) * self.amp_fac
            target_slice = complex_abs(ifft2(k_slice))  # I need cuda here!
            # Apply mask
            seed = None if not self.use_seed else tuple(map(ord, file_name))
            masked_kspace, mask = apply_mask(k_slice, self.mask_func, seed)

            data_slice = k_slice_to_chw(masked_kspace)
            # assert data_slice.size(-1) % 2 == 0

            margin = (data_slice.shape[-1] % self.divisor)

            if margin > 0:
                pad = [(self.divisor - margin) // 2,
                       (1 + self.divisor - margin) // 2]
            else:  # This is a temporary fix.
                pad = [0, 0]
            # right_pad = self.divisor - left_pad
            # pad = [pad, pad]
            data_slice = F.pad(
                data_slice, pad=pad,
                value=0)  # This pads at the last dimension of a tensor.

            # Using the data acquisition method (fat suppression) may be useful later on.
        # print(1, data_slice.size())
        # print(2, target_slice.size())
        return data_slice, target_slice
Exemplo n.º 4
0
    def __call__(self, k_slice, target, attrs, file_name, slice_num):
        assert np.iscomplexobj(k_slice), 'kspace must be complex.'

        if k_slice.ndim == 2:  # For singlecoil. Makes data processing later on much easier.
            k_slice = np.expand_dims(k_slice, axis=0)
        elif k_slice.ndim != 3:  # Prevents possible errors.
            raise RuntimeError(
                'Invalid slice shape. Please check input shape.')

        with torch.no_grad():  # Remove unnecessary gradient calculations.
            # Now a Tensor of (num_coils, height, width, 2), where 2 is (real, imag).
            kspace_target = to_tensor(k_slice).to(device=self.device)

            # Apply mask
            seed = None if not self.use_seed else tuple(map(ord, file_name))
            masked_kspace, mask = apply_mask(kspace_target, self.mask_func,
                                             seed)
            # Multiplying the whole tensor by 1/scaling is faster than dividing the whole tensor by scaling.

            # Maybe call this scaling k_scale or something. Using scaling every time is confusing.
            k_scale = torch.std(
                masked_kspace)  # Pseudo-standard deviation for normalization.
            masked_kspace *= (torch.as_tensor(1) / k_scale
                              )  # Standardization of CNN inputs.

            # Performing log weighting for smoother inputs. It can be before padding since 0 will be 0 after weighting.
            masked_kspace = log_weighting(k_slice_to_chw(masked_kspace),
                                          scale=self.log_amp_scale)

            margin = masked_kspace.size(-1) % self.divisor

            if margin > 0:
                pad = [(self.divisor - margin) // 2,
                       (1 + self.divisor - margin) // 2]
            else:  # This is a temporary fix to prevent padding by half the divisor when margin=0.
                pad = [0, 0]

            # This pads at the last dimension of a tensor with 0.
            masked_kspace = F.pad(masked_kspace, pad=pad, value=0)

        return masked_kspace, kspace_target, (k_scale, mask)
Exemplo n.º 5
0
    def __call__(self, k_slice, target, attrs, file_name, slice_num):
        assert np.iscomplexobj(k_slice), 'kspace must be complex.'
        # assert k_slice.shape[-1] % 2 == 0, 'k-space data width must be even.'

        if k_slice.ndim == 2:  # For singlecoil. Makes data processing later on much easier.
            k_slice = np.expand_dims(k_slice, axis=0)
        elif k_slice.ndim != 3:  # Prevents possible errors.
            raise RuntimeError(
                'Invalid slice shape. Please check input shape.')

        with torch.no_grad():  # Remove unnecessary gradient calculations.
            # Now a Tensor of (num_coils, height, width, 2), where 2 is (real, imag).
            k_slice = to_tensor(k_slice).to(device=self.device)
            scaling = torch.std(
                k_slice)  # Pseudo-standard deviation for normalization.
            target_slice = complex_abs(
                ifft2(k_slice))  # Labels are not standardized.
            k_slice *= (torch.ones(
                ()) / scaling)  # Standardization of CNN inputs.
            # Using weird multiplication because multiplication is much faster than division.
            # Multiplying the whole tensor by 1/scaling is faster than dividing the whole tensor by scaling.

            # Apply mask
            seed = None if not self.use_seed else tuple(map(ord, file_name))
            masked_kspace, mask = apply_mask(k_slice, self.mask_func, seed)

            data_slice = k_slice_to_chw(masked_kspace)

            margin = data_slice.size(-1) % self.divisor

            if margin > 0:
                pad = [(self.divisor - margin) // 2,
                       (1 + self.divisor - margin) // 2]
            else:  # This is a temporary fix to prevent padding by half the divisor when margin=0.
                pad = [0, 0]

            data_slice = F.pad(
                data_slice, pad=pad,
                value=0)  # This pads at the last dimension of a tensor with 0.

        return data_slice, target_slice, scaling  # This has a different output API.
Exemplo n.º 6
0
    def __call__(self, k_slice, target, attrs, file_name, slice_num):
        assert np.iscomplexobj(k_slice), 'kspace must be complex.'

        if k_slice.ndim == 2:  # For singlecoil. Makes data processing later on much easier.
            k_slice = np.expand_dims(k_slice, axis=0)
        elif k_slice.ndim != 3:  # Prevents possible errors.
            raise RuntimeError(
                'Invalid slice shape. Please check input shape.')

        with torch.no_grad():  # Remove unnecessary gradient calculations.
            # Now a Tensor of (num_coils, height, width, 2), where 2 is (real, imag).
            kspace_target = to_tensor(k_slice).to(device=self.device)
            c_img_target = k_slice_to_chw(
                ifft2(kspace_target))  # Assumes only C2C will be calculated.

            # Apply mask
            seed = None if not self.use_seed else tuple(map(ord, file_name))
            masked_kspace, mask = apply_mask(kspace_target, self.mask_func,
                                             seed)

            c_img_input = k_slice_to_chw(ifft2(masked_kspace))
            c_scale = torch.std(c_img_input)
            c_img_input *= (torch.tensor(1) / c_scale)
            c_bias = torch.mean(c_img_input)
            c_img_input -= c_bias

            margin = c_img_input.size(-1) % self.divisor

            if margin > 0:  # Cut off instead of adding padding.
                left = margin // 2
                right = (margin + 1) // 2
                assert c_img_input.size() == c_img_target.size()
                c_img_input = c_img_input[..., left:-right]
                c_img_target = c_img_target[..., left:-right]

        assert c_img_input.size() == c_img_target.size()

        return c_img_input, c_img_target, (c_scale, c_bias)
Exemplo n.º 7
0
from data.data_transforms import kspace_to_nchw, nchw_to_kspace, to_tensor, k_slice_to_chw
import numpy as np
import torch
from time import time

k1 = np.random.uniform(size=(32, 15, 640, 328))
k2 = np.random.uniform(size=(32, 15, 640, 328))

k = k1 + k2 * 1j

kt = to_tensor(k)

tic = time()
ncwh = kspace_to_nchw(kt)
mid = kt.shape[1]

for idx, kts in enumerate(kt):
    temp = k_slice_to_chw(kts)
    print(idx, torch.eq(ncwh[idx], temp).all())

chan = 17
ri = chan % 2
sli = chan // 2

print(
    torch.eq(torch.squeeze(ncwh[3, chan, ...]),
             torch.squeeze(kt[3, sli, ..., ri])).all())
kspace = nchw_to_kspace(ncwh)
toc = time() - tic

print(torch.eq(kt, kspace).all(), toc)
Exemplo n.º 8
0
import torch
import h5py

from data.data_transforms import ifft2, to_tensor, root_sum_of_squares, center_crop, complex_center_crop, complex_abs

file = '/media/veritas/D/FastMRI/multicoil_val/file1001798.h5'
sdx = 10
with h5py.File(file, mode='r') as hf:
    kspace = hf['kspace'][sdx]
    target = hf['reconstruction_rss'][sdx]

cmg_scale = 2E-5
recon = complex_center_crop(ifft2(to_tensor(kspace) / cmg_scale),
                            shape=(320, 320)) * cmg_scale
recon = root_sum_of_squares(complex_abs(recon))
target = to_tensor(target)

print(torch.allclose(recon, target))
Exemplo n.º 9
0
import torch
import h5py
import numpy as np

from data.data_transforms import root_sum_of_squares, center_crop, ifft2, to_tensor, complex_abs

file = '/media/veritas/D/FastMRI/multicoil_val/file1000229.h5'
with h5py.File(file, 'r') as hf:
    kspace = hf['kspace'][()]
    rss = hf['reconstruction_rss'][()]

kspace = to_tensor(kspace)
image = center_crop(root_sum_of_squares(complex_abs(ifft2(kspace)), dim=1),
                    shape=(320, 320)).squeeze().numpy()
print(np.allclose(image, rss))