def preprocess(kspace, maps, args):
    # Batch size dimension must be the same!
    assert kspace.shape[0] == maps.shape[0]
    batch_size = kspace.shape[0]

    # Convert everything from numpy arrays to tensors
    kspace = cplx.to_tensor(kspace)
    maps = cplx.to_tensor(maps)

    # Initialize ESPIRiT model
    A = T.SenseModel(maps)

    # Compute normalization factor (based on 95% max signal level in view-shared dataa)
    averaged_kspace = T.time_average(kspace, dim=3)
    image = A(averaged_kspace, adjoint=True)
    magnitude_vals = cplx.abs(image).reshape(batch_size, -1)
    k = int(round(0.05 * magnitude_vals[0].numel()))
    scale = torch.min(torch.topk(magnitude_vals, k, dim=1).values,
                      dim=1).values

    # Normalize k-space data
    kspace /= scale[:, None, None, None, None, None]

    # Compute network initialization
    if args.slwin_init:
        init_image = A(T.sliding_window(kspace, dim=3, window_size=5),
                       adjoint=True)
    else:
        init_image = A(masked_kspace, adjoint=True)

    return kspace.unsqueeze(1), maps.unsqueeze(1), init_image.unsqueeze(1)
def process_slice(kspace, args):
    nkx, nky, nphases, ncoils = kspace.shape

    if 0 < args.crop_size < nkx:
        # crop along readout dimension
        images = fftc.ifftc(kspace, axis=0)
        images = center_crop(images, [args.crop_size, nky, nphases, ncoils])
        kspace = fftc.fftc(images, axis=0).astype(np.complex64)
        nkx = args.crop_size

    # simulate reduced FOV
    #kspace = reduce_fov(kspace, ...)

    # compute time-average for ESPIRiT calibration
    kspace_avg = time_average(kspace, axis=-2)

    # ESPIRiT - compute sensitivity maps
    cmd = f'ecalib -d 0 -S -m {args.nmaps} -c {args.crop_value} -r {args.calib_size}'
    maps = bart.bart(1, cmd, kspace_avg[:, :, None, :])
    maps = np.reshape(maps, (nkx, nky, 1, ncoils, args.nmaps))

    # Convert everything to tensors
    kspace_tensor = cplx.to_tensor(kspace).unsqueeze(0)
    maps_tensor = cplx.to_tensor(maps).unsqueeze(0)

    # Do coil combination using sensitivity maps (PyTorch)
    A = T.SenseModel(maps_tensor)
    im_tensor = A(kspace_tensor, adjoint=True)

    # Convert tensor back to numpy array
    target = cplx.to_numpy(im_tensor.squeeze(0))

    return kspace, maps, target
def visualize(args, epoch, model, data_loader, writer):
    def save_image(image, tag):
        image = cplx.abs(image).permute(0, 3, 1, 2)  # magnitude
        image -= image.min()
        image /= image.max()
        grid = torchvision.utils.make_grid(image, nrow=4, pad_value=1)
        writer.add_image(tag, grid, epoch)

    model.eval()
    with torch.no_grad():
        for iter, data in enumerate(data_loader):
            # Load all data arrays
            input, maps, target, mean, std, norm = data
            input = input.to(args.device)
            maps = maps.to(args.device)
            target = target.to(args.device)

            # Compute zero-filled recon
            A = T.SenseModel(maps)
            zf = A(input, adjoint=True)

            # Compute DL recon
            output = model(input, maps)

            save_image(zf, 'Input')
            save_image(target, 'Target')
            save_image(output, 'Reconstruction')
            save_image(target - output, 'Error')
            break
def process_slice(kspace, args, calib_method='jsense'):
    # get data dimensions
    nky, nkz, nechoes, ncoils = kspace.shape

    # ESPIRiT parameters
    nmaps = args.num_emaps
    calib_size = args.ncalib
    crop_value = args.crop_value

    if args.device is -1:
        device = sp.cpu_device
    else:
        device = sp.Device(args.device)

    # compute sensitivity maps (BART)
    #cmd = f'ecalib -d 0 -S -m {nmaps} -c {crop_value} -r {calib_size}'
    #maps = bart.bart(1, cmd, kspace[:,:,0,None,:])
    #maps = np.reshape(maps, (nky, nkz, 1, ncoils, nmaps))

    # compute sensitivity maps (SigPy)
    ksp = np.transpose(kspace[:, :, 0, :], [2, 1, 0])
    if calib_method is 'espirit':
        maps = app.EspiritCalib(ksp,
                                calib_width=calib_size,
                                crop=crop_value,
                                device=device,
                                show_pbar=False).run()
    elif calib_method is 'jsense':
        maps = app.JsenseRecon(ksp,
                               mps_ker_width=6,
                               ksp_calib_width=calib_size,
                               device=device,
                               show_pbar=False).run()
    else:
        raise ValueError('%s calibration method not implemented...' %
                         calib_method)
    maps = np.reshape(np.transpose(maps, [2, 1, 0]),
                      (nky, nkz, 1, ncoils, nmaps))

    # Convert everything to tensors
    kspace_tensor = cplx.to_tensor(kspace).unsqueeze(0)
    maps_tensor = cplx.to_tensor(maps).unsqueeze(0)

    # Do coil combination using sensitivity maps (PyTorch)
    A = T.SenseModel(maps_tensor)
    im_tensor = A(kspace_tensor, adjoint=True)

    # Convert tensor back to numpy array
    image = cplx.to_numpy(im_tensor.squeeze(0))

    return image, maps
def visualize(args, epoch, model, data_loader, writer, is_training=True):
    def save_image(image, tag, shape=None):
        image = image.permute(0, 3, 1, 2)
        image -= image.min()
        image /= image.max()
        if shape is not None:
            image = torch.nn.functional.interpolate(image,
                                                    size=shape,
                                                    mode='bilinear',
                                                    align_corners=True)
        grid = torchvision.utils.make_grid(image, nrow=1, pad_value=1)
        writer.add_image(tag, grid, epoch)

    model.eval()
    with torch.no_grad():
        for iter, data in enumerate(data_loader):
            # Load all data arrays
            input, maps, target, mean, std, norm = data
            input = input.to(args.device)
            maps = maps.to(args.device)
            target = target.to(args.device)

            # Compute zero-filled recon
            A = T.SenseModel(maps)
            zf = A(input, adjoint=True)

            # Compute DL recon
            output = model(input, maps)

            # Slice images [b, y, z, e, 2]
            init = zf[:, :, :, 0, None]
            output = output[:, :, :, 0, None]
            target = target[:, :, :, 0, None]
            mask = cplx.get_mask(input[:, :, :, 0])  # [b, y, t, 2]

            # Save images to summary
            tag = 'Train' if is_training else 'Val'
            all_images = torch.cat((init, output, target), dim=2)
            save_image(cplx.abs(all_images),
                       '%s_Images' % tag,
                       shape=[320, 3 * 320])
            save_image(cplx.angle(all_images),
                       '%s_Phase' % tag,
                       shape=[320, 3 * 320])
            save_image(cplx.abs(output - target),
                       '%s_Error' % tag,
                       shape=[320, 320])
            save_image(mask.permute(0, 2, 1, 3), '%s_Mask' % tag)

            break
示例#6
0
def visualize(args, epoch, model, data_loader, writer, is_training=True):
    def save_image(image, tag):
        image = image.permute(0, 3, 1, 2)
        image -= image.min()
        image /= image.max()
        grid = torchvision.utils.make_grid(image, nrow=1, pad_value=1)
        writer.add_image(tag, grid, epoch)

    model.eval()
    with torch.no_grad():
        for iter, data in enumerate(data_loader):
            # Load all data arrays
            input, maps, init, target, mean, std, norm = data
            input = input.to(args.device)
            maps = maps.to(args.device)
            init = init.to(args.device)
            target = target.to(args.device)

            # Data dimensions (for my own reference)
            #  image size:  [batch_size, nx,   ny, nt, nmaps, 2]
            #  kspace size: [batch_size, nkx, nky, nt, ncoils, 2]
            #  maps size:   [batch_size, nkx,  ny,  1, ncoils, nmaps, 2]

            # Initialize signal model
            A = T.SenseModel(maps)

            # Compute DL recon
            output = model(input, maps, init_image=init)

            # Slice images
            init = init[:, :, :, 10, 0, None]
            output = output[:, :, :, 10, 0, None]
            target = target[:, :, :, 10, 0, None]
            mask = cplx.get_mask(input[:, -1, :, :, 0, :])  # [b, y, t, 2]

            # Save images to summary
            tag = 'Train' if is_training else 'Val'
            all_images = torch.cat((init, output, target), dim=2)
            save_image(cplx.abs(all_images), '%s_Images' % tag)
            save_image(cplx.angle(all_images), '%s_Phase' % tag)
            save_image(cplx.abs(output - target), '%s_Error' % tag)
            save_image(mask.permute(0, 2, 1, 3), '%s_Mask' % tag)

            break
示例#7
0
    def __call__(self, kspace, maps, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        seed = None if not self.use_seed else tuple(map(ord, fname))

        # Convert everything from numpy arrays to tensors
        kspace = cplx.to_tensor(kspace).unsqueeze(0)
        maps = cplx.to_tensor(maps).unsqueeze(0)
        target = cplx.to_tensor(target).unsqueeze(0)
        norm = torch.sqrt(torch.mean(cplx.abs(target)**2))

        # Apply random data augmentation
        kspace, target = self.augment(kspace, target, seed)

        # Undersample k-space data
        masked_kspace, mask = ss.subsample(kspace, self.mask_func, seed)

        # Initialize ESPIRiT model
        A = T.SenseModel(maps)

        # Compute normalization factor (based on 95% max signal level in view-shared dataa)
        averaged_kspace = T.time_average(masked_kspace, dim=3)
        image = A(averaged_kspace, adjoint=True)
        magnitude_vals = cplx.abs(image).reshape(-1)
        k = int(round(0.05 * magnitude_vals.numel()))
        scale = torch.min(torch.topk(magnitude_vals, k).values)

        # Normalize k-space and target images
        masked_kspace /= scale
        target /= scale
        mean = torch.tensor([0.0], dtype=torch.float32)
        std = scale

        # Compute network initialization
        if self.slwin_init:
            init_image = A(T.sliding_window(masked_kspace,
                                            dim=3,
                                            window_size=5),
                           adjoint=True)
        else:
            init_image = A(masked_kspace, adjoint=True)

        # Get rid of batch dimension...
        masked_kspace = masked_kspace.squeeze(0)
        maps = maps.squeeze(0)
        init_image = init_image.squeeze(0)
        target = target.squeeze(0)

        return masked_kspace, maps, init_image, target, mean, std, norm
示例#8
0
def main():
    # ARGS
    input_data_path = '/mnt/dense/data_public/fastMRI/multicoil_val'
    output_data_path = '/mnt/raid3/sandino/fastMRI/validate_full'
    center_fraction = 0.04  # number of k-space lines used to do ESPIRiT calib
    num_emaps = 1
    dbwrite = False

    input_files = glob.glob(os.path.join(input_data_path, '*.h5'))

    for file in input_files:
        # Load HDF5 file
        hf = h5py.File(file, 'r')
        # existing keys: ['ismrmrd_header', 'kspace', 'reconstruction_rss']

        # load k-space and image data from HDF5 file
        kspace_orig = hf['kspace'][()]
        im_rss = hf['reconstruction_rss'][()]  # (33, 320, 320)

        # get data dimensions
        num_slices, num_coils, num_kx, num_ky = kspace_orig.shape
        xres, yres = im_rss.shape[1:3]  # matrix size
        num_low_freqs = int(round(center_fraction * yres))

        # allocate memory for new arrays
        im_shape = (xres, yres)
        kspace = np.zeros((num_slices, xres, yres, num_coils),
                          dtype=np.complex64)
        maps = np.zeros((num_slices, xres, yres, num_coils, num_emaps),
                        dtype=np.complex64)
        im_truth = np.zeros((num_slices, xres, yres, num_emaps),
                            dtype=np.complex64)

        for sl in range(num_slices):
            kspace_slice = np.transpose(kspace_orig[sl], axes=[1, 2, 0])
            kspace_slice = kspace_slice[:, :, None, :]

            # Data dimensions for BART:
            #  kspace - (kx, ky, 1, coils)
            #  maps - (kx, ky, 1, coils, emaps)
            # Data dimensions for PyTorch:
            #  kspace - (1, kx, ky, coils, real/imag)
            #  maps   - (1, kx, ky, coils, emaps, real/imag)

            # Pre-process k-space data (PyTorch)
            kspace_tensor = cplx.to_tensor(
                np.transpose(kspace_slice, axes=[2, 0, 1,
                                                 3]))  # (1, 640, 372, 15, 2)
            image_tensor = T.ifft2(kspace_tensor)
            print(image_tensor.size())
            image_tensor = cplx.center_crop(image_tensor, im_shape)
            kspace_tensor = T.fft2(image_tensor)
            kspace_slice = np.transpose(cplx.to_numpy(kspace_tensor),
                                        axes=[1, 2, 0, 3])

            # Compute sensitivity maps (BART)
            maps_slice = bart.bart(
                1, f'ecalib -d 0 -m {num_emaps} -c 0.1 -r {num_low_freqs}',
                kspace_slice)
            maps_slice = np.reshape(maps_slice,
                                    (xres, yres, 1, num_coils, num_emaps))
            maps_tensor = cplx.to_tensor(
                np.transpose(maps_slice, axes=[2, 0, 1, 3, 4]))

            # Do coil combination using sensitivity maps (PyTorch)
            A = T.SenseModel(maps_tensor)
            im_tensor = A(kspace_tensor, adjoint=True)

            # Convert image tensor to numpy array
            im_slice = cplx.to_numpy(im_tensor)

            # Re-shape and save everything
            kspace[sl] = np.reshape(kspace_slice, (xres, yres, num_coils))
            maps[sl] = np.reshape(maps_slice,
                                  (xres, yres, num_coils, num_emaps))
            im_truth[sl] = np.reshape(im_slice, (xres, yres, num_emaps))

        # write out new hdf5
        file_new = os.path.join(output_data_path, os.path.split(file)[-1])
        with h5py.File(file_new, 'w') as hf_new:
            # create datasets within HDF5
            hf_new.create_dataset('kspace', data=kspace)
            hf_new.create_dataset('maps', data=maps)
            hf_new.create_dataset('reconstruction_espirit', data=im_truth)
            hf_new.create_dataset('reconstruction_rss',
                                  data=im_rss)  # provided by fastMRI
            hf_new.create_dataset('ismrmrd_header', data=hf['ismrmrd_header'])

            # create attributes (metadata)
            for key in hf.attrs.keys():
                hf_new.attrs[key] = hf.attrs[key]

        if dbwrite:
            hf_new = h5py.File(file_new, 'r')
            print('Keys:', list(hf_new.keys()))
            print('Attrs:', dict(hf_new.attrs))
            cfl.writecfl('/home/sandino/maps', hf_new['maps'][()])
            cfl.writecfl('/home/sandino/kspace', hf_new['kspace'][()])
            cfl.writecfl('/home/sandino/im_truth',
                         hf_new['reconstruction_rss'][()])
            cfl.writecfl('/home/sandino/im_recon',
                         hf_new['reconstruction_espirit'][()])
    def __call__(self, kspace, maps, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """
        # Convert everything from numpy arrays to tensors
        kspace = cplx.to_tensor(kspace).unsqueeze(0)
        maps = cplx.to_tensor(maps).unsqueeze(0)
        target = cplx.to_tensor(target).unsqueeze(0)
        norm = torch.sqrt(torch.mean(cplx.abs(target)**2))

        #print(kspace.shape)
        #print(maps.shape)
        #print(target.shape)

        # Apply mask in k-space
        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace, mask = ss.subsample(kspace,
                                           self.mask_func,
                                           seed,
                                           mode='2D')

        # Normalize data...
        if 0:
            A = T.SenseModel(maps, weights=mask)
            image = A(masked_kspace, adjoint=True)
            magnitude = cplx.abs(image)
        elif 1:
            # ... by magnitude of zero-filled reconstruction
            A = T.SenseModel(maps)
            image = A(masked_kspace, adjoint=True)
            magnitude_vals = cplx.abs(image).reshape(-1)
            k = int(round(0.05 * magnitude_vals.numel()))
            scale = torch.min(torch.topk(magnitude_vals, k).values)
        else:
            # ... by power within calibration region
            calib_size = 10
            calib_region = cplx.center_crop(masked_kspace,
                                            [calib_size, calib_size])
            scale = torch.mean(cplx.abs(calib_region)**2)
            scale = scale * (calib_size**2 / kspace.size(-3) / kspace.size(-2))

        masked_kspace /= scale
        target /= scale
        mean = torch.tensor([0.0], dtype=torch.float32)
        std = scale

        # Get rid of batch dimension...
        masked_kspace = masked_kspace.squeeze(0)
        maps = maps.squeeze(0)
        target = target.squeeze(0)

        return masked_kspace, maps, target, mean, std, norm