예제 #1
0
 def __init__(
         self,
         image_size=IMAGE_SIZE,
         acq_type='radial',
         dcomp=True,
         scale_factor=1e6,
         traj=None,
         crop_image_data=False,
         **kwargs,
     ):
     self.image_size = image_size
     self.acq_type = acq_type
     self.traj = traj
     self._check_acq_type()
     self.dcomp = dcomp
     self.scale_factor = scale_factor
     self.crop_image_data = crop_image_data
     self.nufft_obj = KbNufftModule(
         im_size=self.image_size,
         grid_size=None,
         norm='ortho',
     )
     super(NonCartesianFastMRIDatasetBuilder, self).__init__(
         **kwargs,
     )
     if self.brain:
         raise ValueError(
             'Currently the non cartesian data works only with knee data.')
     self._check_mode()
     self._check_dcomp_multicoil()
예제 #2
0
def test_adjoint_and_gradients(im_size, batch_size):
    tf.random.set_seed(0)
    grid_size = tuple(np.array(im_size) * 2)
    im_rank = len(im_size)
    M = im_size[0] * 2**im_rank
    nufft_ob = KbNufftModule(im_size=im_size,
                             grid_size=grid_size,
                             norm='ortho',
                             grad_traj=True)
    # Generate Trajectory
    ktraj_ori = tf.Variable(
        tf.random.uniform(
            (batch_size, im_rank, M), minval=-1 / 2, maxval=1 / 2) * 2 * np.pi)
    # Have a random signal
    signal = tf.Variable(
        tf.cast(tf.random.uniform((batch_size, 1, *im_size)), tf.complex64))
    kdata = tf.Variable(
        kbnufft_forward(nufft_ob._extract_nufft_interpob())(signal, ktraj_ori))
    Idata = tf.Variable(
        kbnufft_adjoint(nufft_ob._extract_nufft_interpob())(kdata, ktraj_ori))
    ktraj_noise = np.copy(ktraj_ori)
    ktraj_noise += 0.01 * tf.Variable(
        tf.random.uniform(
            (batch_size, im_rank, M), minval=-1 / 2, maxval=1 / 2) * 2 * np.pi)
    ktraj = tf.Variable(ktraj_noise)
    with tf.GradientTape(persistent=True) as g:
        I_nufft = kbnufft_adjoint(nufft_ob._extract_nufft_interpob())(kdata,
                                                                      ktraj)
        A = get_fourier_matrix(ktraj, im_size, im_rank, do_ifft=True)
        I_ndft = tf.reshape(tf.transpose(tf.matmul(kdata, A), [0, 1, 2]),
                            (batch_size, 1, *im_size))
        loss_nufft = tf.math.reduce_mean(tf.abs(Idata - I_nufft)**2)
        loss_ndft = tf.math.reduce_mean(tf.abs(Idata - I_ndft)**2)

    tf_test = tf.test.TestCase()
    # Test if the NUFFT and NDFT operation is same
    tf_test.assertAllClose(I_nufft, I_ndft, atol=2e-3)

    # Test gradients with respect to kdata
    gradient_ndft_kdata = g.gradient(I_ndft, kdata)[0]
    gradient_nufft_kdata = g.gradient(I_nufft, kdata)[0]
    tf_test.assertAllClose(gradient_ndft_kdata,
                           gradient_nufft_kdata,
                           atol=6e-3)

    # Test gradients with respect to trajectory location
    gradient_ndft_traj = g.gradient(I_ndft, ktraj)[0]
    gradient_nufft_traj = g.gradient(I_nufft, ktraj)[0]
    tf_test.assertAllClose(gradient_ndft_traj, gradient_nufft_traj, atol=6e-3)

    # Test gradients in chain rule with respect to ktraj
    gradient_ndft_loss = g.gradient(loss_ndft, ktraj)[0]
    gradient_nufft_loss = g.gradient(loss_nufft, ktraj)[0]
    tf_test.assertAllClose(gradient_ndft_loss, gradient_nufft_loss, atol=5e-4)
 def __init__(self, multicoil=False, im_size=(640, 472), density_compensation=False, **kwargs):
     super(NFFTBase, self).__init__(**kwargs)
     self.multicoil = multicoil
     self.im_size = im_size
     self.nufft_ob = KbNufftModule(
         im_size=im_size,
         grid_size=None,
         norm='ortho',
     )
     self.density_compensation = density_compensation
     self.forward_op = kbnufft_forward(self.nufft_ob._extract_nufft_interpob())
     self.backward_op = kbnufft_adjoint(self.nufft_ob._extract_nufft_interpob())
예제 #4
0
def test_adjoint_gradient():
    traj = ktraj_function()
    kspace = tf.zeros([1, 1, kspace_shape], dtype=tf.complex64)
    nufft_ob = KbNufftModule(
        im_size=(640, 400),
        grid_size=None,
        norm='ortho',
    )
    backward_op = kbnufft_adjoint(nufft_ob._extract_nufft_interpob())
    with tf.GradientTape() as tape:
        tape.watch(kspace)
        res = backward_op(kspace, traj)
    grad = tape.gradient(res, kspace)
    tf_test = tf.test.TestCase()
    tf_test.assertEqual(grad.shape, kspace.shape)
예제 #5
0
def test_forward_gradient():
    traj = ktraj_function()
    image = tf.zeros([1, 1, *image_shape], dtype=tf.complex64)
    nufft_ob = KbNufftModule(
        im_size=(640, 400),
        grid_size=None,
        norm='ortho',
    )
    forward_op = kbnufft_forward(nufft_ob._extract_nufft_interpob())
    with tf.GradientTape() as tape:
        tape.watch(image)
        res = forward_op(image, traj)
    grad = tape.gradient(res, image)
    tf_test = tf.test.TestCase()
    tf_test.assertEqual(grad.shape, image.shape)
예제 #6
0
def setup():
    spokelength = 400
    grid_size = (spokelength, spokelength)
    nspokes = 10

    ga = np.deg2rad(180 / ((1 + np.sqrt(5)) / 2))
    kx = np.zeros(shape=(spokelength, nspokes))
    ky = np.zeros(shape=(spokelength, nspokes))
    ky[:, 0] = np.linspace(-np.pi, np.pi, spokelength)
    for i in range(1, nspokes):
        kx[:, i] = np.cos(ga) * kx[:, i - 1] - np.sin(ga) * ky[:, i - 1]
        ky[:, i] = np.sin(ga) * kx[:, i - 1] + np.cos(ga) * ky[:, i - 1]

    ky = np.transpose(ky)
    kx = np.transpose(kx)

    ktraj = np.stack((ky.flatten(), kx.flatten()), axis=0)
    im_size = (200, 200)
    nufft_ob = KbNufftModule(im_size=im_size,
                             grid_size=grid_size,
                             norm='ortho')
    torch_forward = KbNufft(im_size=im_size, grid_size=grid_size, norm='ortho')
    torch_backward = AdjKbNufft(im_size=im_size,
                                grid_size=grid_size,
                                norm='ortho')
    return ktraj, nufft_ob, torch_forward, torch_backward
예제 #7
0
def profile_tfkbnufft(
    image,
    ktraj,
    im_size,
    device,
):
    if device == 'CPU':
        num_nuffts = 20
    else:
        num_nuffts = 50
    print(f'Using {device}')
    device_name = f'/{device}:0'
    with tf.device(device_name):
        image = tf.constant(image)
        if device == 'GPU':
            image = tf.cast(image, tf.complex64)
        ktraj = tf.constant(ktraj)
        nufft_ob = KbNufftModule(im_size=im_size, grid_size=None, norm='ortho')
        forward_op = kbnufft_forward(nufft_ob._extract_nufft_interpob())
        adjoint_op = kbnufft_adjoint(nufft_ob._extract_nufft_interpob())

        # warm-up computation
        for _ in range(2):
            y = forward_op(image, ktraj)

        start_time = time.perf_counter()
        for _ in range(num_nuffts):
            y = forward_op(image, ktraj)
        end_time = time.perf_counter()
        avg_time = (end_time - start_time) / num_nuffts
        print('forward average time: {}'.format(avg_time))

        # warm-up computation
        for _ in range(2):
            x = adjoint_op(y, ktraj)

        # run the adjoint speed tests
        start_time = time.perf_counter()
        for _ in range(num_nuffts):
            x = adjoint_op(y, ktraj)
        end_time = time.perf_counter()
        avg_time = (end_time - start_time) / num_nuffts
        print('backward average time: {}'.format(avg_time))
def test_ncpdnet_init_and_call_3d(dcomp, volume_shape):
    model = NCPDNet(
        n_iter=1,
        n_primal=2,
        n_filters=2,
        multicoil=False,
        im_size=volume_shape,
        three_d=True,
        dcomp=dcomp,
        fastmri=False,
    )
    af = 16
    traj = get_stacks_of_radial_trajectory(volume_shape, af=af)
    spokelength = volume_shape[-2]
    nspokes = volume_shape[-1] // af
    nstacks = volume_shape[0]
    kspace_shape = nspokes * spokelength * nstacks
    extra_args = (tf.constant([volume_shape]), )
    if dcomp:
        nufft_ob = KbNufftModule(
            im_size=volume_shape,
            grid_size=None,
            norm='ortho',
        )
        interpob = nufft_ob._extract_nufft_interpob()
        nufftob_forw = kbnufft_forward(interpob)
        nufftob_back = kbnufft_adjoint(interpob)
        dcomp = calculate_radial_dcomp_tf(
            interpob,
            nufftob_forw,
            nufftob_back,
            traj[0],
            stacks=True,
        )
        dcomp = tf.ones([1, tf.shape(dcomp)[0]],
                        dtype=dcomp.dtype) * dcomp[None, :]
        extra_args += (dcomp, )
    res = model([
        tf.zeros([1, 1, kspace_shape, 1], dtype=tf.complex64),
        traj,
        extra_args,
    ])
    assert res.shape[1:4] == volume_shape
예제 #9
0
def generate_oasis_tf_records(
        acq_type='radial_stacks',
        af=4,
        mode='train',
        shard=0,
        shard_size=3300,
        slice_size=176,
    ):
    tf.config.experimental_run_functions_eagerly(
        True,
    )
    path = Path(OASIS_DATA_DIR) / mode
    filenames = sorted(list(path.glob('*.nii.gz')))
    filenames = filenames[shard*shard_size:(shard+1)*shard_size]
    scale_factor = 1e-2
    volume_size = (slice_size, 256, 256)
    extension = get_extension_for_acq(
        volume_size,
        acq_type=acq_type,
        compute_dcomp=True,
        scale_factor=scale_factor,
        af=af,
    )
    extension = extension + '.tfrecords'
    nufft_ob = KbNufftModule(
        im_size=volume_size,
        grid_size=None,
        norm='ortho',
    )
    volume_transform = non_cartesian_from_volume_to_nc_kspace_and_traj(
        nufft_ob,
        volume_size,
        acq_type=acq_type,
        scale_factor=scale_factor,
        compute_dcomp=True,
        af=af,
    )
    for filename in tqdm(filenames):
        directory = filename.parent
        filename_tfrecord = directory / (filename.stem + extension)
        if filename_tfrecord.exists():
            continue
        volume = from_file_to_volume(filename)
        if volume.shape[0] % 2 != 0:
            continue
        if volume.shape[0] == 36 or volume.shape[0] == 44:
            continue
        with tf.device('/gpu:0'):
            volume = tf.constant(volume, dtype=tf.complex64)
            model_inputs, model_outputs = volume_transform(volume)
        with tf.io.TFRecordWriter(str(filename_tfrecord)) as writer:
            example = encode_example(model_inputs, model_outputs, compute_dcomp=True)
            writer.write(example)
예제 #10
0
def test_gridded_preprocessing():
    image_size = (640, 400)
    nfft_ob = KbNufftModule(im_size=image_size)
    preproc_fun = non_cartesian_from_kspace_to_nc_kspace_and_traj(
        nfft_ob,
        image_size,
        gridding=True,
        af=20,
    )
    image = tf.random.normal([1, 320, 320])
    kspace = tf.cast(tf.random.normal([1, 640, 320]), tf.complex64)
    (kspace_masked, mask), image_out = preproc_fun(image, kspace)
    assert tf.squeeze(kspace_masked).shape == image_size
예제 #11
0
def train_nc_kspace_dataset_from_indexable(
    path,
    volume_size=(256, 256, 256),
    scale_factor=1,
    n_samples=None,
    acq_type='radial_stacks',
    compute_dcomp=False,
    shuffle=True,
    **acq_kwargs,
):
    files_ds = tf.data.Dataset.list_files(f'{path}*.nii.gz', shuffle=False)
    # this makes sure the file selection is happening once when using less than
    # all samples
    if shuffle:
        files_ds = files_ds.shuffle(
            buffer_size=1000,
            seed=0,
            reshuffle_each_iteration=False,
        )
    volume_ds = files_ds.map(
        _tf_filename_to_volume,
        num_parallel_calls=3,
    )
    # filter flat volumes and uneven
    volume_ds = volume_ds.filter(lambda x: tf.shape(x)[0] > 1)
    volume_ds = volume_ds.filter(lambda x: tf.math.mod(tf.shape(x)[0], 2) == 0)
    if n_samples is not None:
        volume_ds = volume_ds.take(n_samples)
    nufft_ob = KbNufftModule(
        im_size=volume_size,
        grid_size=None,
        norm='ortho',
    )
    volume_ds = volume_ds.map(
        non_cartesian_from_volume_to_nc_kspace_and_traj(
            nufft_ob,
            volume_size,
            acq_type=acq_type,
            scale_factor=scale_factor,
            compute_dcomp=compute_dcomp,
            **acq_kwargs,
        ),
        num_parallel_calls=3,
    ).repeat().prefetch(buffer_size=3)

    return volume_ds
def train_nc_kspace_dataset_from_indexable(
    path,
    image_size,
    inner_slices=None,
    rand=False,
    scale_factor=1,
    contrast=None,
    n_samples=None,
    acq_type='radial',
    compute_dcomp=True,  # for backwards compatibility
    **acq_kwargs,
):
    if not compute_dcomp:
        raise NotImplementedError(
            'Non-cartesian multicoil is not implemented without density compensation.'
        )
    selection = [{'inner_slices': inner_slices, 'rand': rand}]

    def _tf_filename_to_image_and_kspace_and_contrast(filename):
        def _from_train_file_to_image_and_kspace_and_contrast_tensor_to_tensor(
                filename):
            filename_str = filename.numpy()
            image, kspace, contrast = from_multicoil_train_file_to_image_and_kspace_and_contrast(
                filename_str,
                selection=selection,
            )
            return tf.convert_to_tensor(image), tf.convert_to_tensor(
                kspace), tf.convert_to_tensor(contrast)

        [image, kspace, contrast] = tf.py_function(
            _from_train_file_to_image_and_kspace_and_contrast_tensor_to_tensor,
            [filename],
            [tf.float32, tf.complex64, tf.string],
        )
        if rand:
            n_slices = (1, )
        else:
            n_slices = (inner_slices, )
        kspace_size = n_slices + (None, 640, None)
        image_size = n_slices + (320, 320)
        image.set_shape(image_size)
        kspace.set_shape(kspace_size)
        return image, kspace, contrast

    files_ds = tf.data.Dataset.list_files(f'{path}*.h5', shuffle=False)
    # this makes sure the file selection is happening once when using less than
    # all samples
    files_ds = files_ds.shuffle(
        buffer_size=1000,
        seed=0,
        reshuffle_each_iteration=False,
    )
    image_and_kspace_and_contrast_ds = files_ds.map(
        _tf_filename_to_image_and_kspace_and_contrast,
        num_parallel_calls=tf.data.experimental.AUTOTUNE,
    )
    # contrast filtering
    if contrast:
        image_and_kspace_and_contrast_ds = image_and_kspace_and_contrast_ds.filter(
            lambda image, kspace, tf_contrast: tf_contrast == contrast)
    image_and_kspace_ds = image_and_kspace_and_contrast_ds.map(
        lambda image, kspace, tf_contrast: (image, kspace),
        num_parallel_calls=tf.data.experimental.AUTOTUNE,
    )
    if n_samples is not None:
        image_and_kspace_ds = image_and_kspace_ds.take(n_samples)
    nufft_ob = KbNufftModule(
        im_size=image_size,
        grid_size=None,
        norm='ortho',
    )
    masked_kspace_ds = image_and_kspace_ds.map(
        non_cartesian_from_kspace_to_nc_kspace_and_traj(
            nufft_ob,
            image_size,
            acq_type=acq_type,
            scale_factor=scale_factor,
            **acq_kwargs,
        ),
        num_parallel_calls=tf.data.experimental.AUTOTUNE
        if rand or inner_slices is not None else None,
    ).repeat().prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    return masked_kspace_ds
예제 #13
0
class NonCartesianFastMRIDatasetBuilder(FastMRIDatasetBuilder):
    def __init__(
            self,
            image_size=IMAGE_SIZE,
            acq_type='radial',
            dcomp=True,
            scale_factor=1e6,
            traj=None,
            crop_image_data=False,
            **kwargs,
        ):
        self.image_size = image_size
        self.acq_type = acq_type
        self.traj = traj
        self._check_acq_type()
        self.dcomp = dcomp
        self.scale_factor = scale_factor
        self.crop_image_data = crop_image_data
        self.nufft_obj = KbNufftModule(
            im_size=self.image_size,
            grid_size=None,
            norm='ortho',
        )
        super(NonCartesianFastMRIDatasetBuilder, self).__init__(
            **kwargs,
        )
        if self.brain:
            raise ValueError(
                'Currently the non cartesian data works only with knee data.')
        self._check_mode()
        self._check_dcomp_multicoil()

    def _check_acq_type(self,):
        if self.acq_type not in ['spiral', 'radial', 'cartesian_debug', 'other']:
            raise ValueError(
                f'acq_type must be spiral, radial or cartesian_debug but is {self.acq_type}'
            )
        if self.acq_type == 'other' and self.traj is None:
            raise ValueError(
                f'Please provide a trajectory as input in case `acq_type` is `other`'
            )

    def _check_mode(self,):
        if self.mode == 'test':
            raise ValueError('NonCartesian dataset cannot be used in test mode')

    def _check_dcomp_multicoil(self,):
        if self.multicoil and not self.dcomp:
            raise ValueError('You must use density compensation when in multicoil')

    def generate_trajectory(self,):
        if self.acq_type == 'radial':
            traj = get_radial_trajectory(self.image_size, af=self.af)
        elif self.acq_type == 'cartesian':
            traj = get_debugging_cartesian_trajectory()
        elif self.acq_type == 'spiral':
            traj = get_spiral_trajectory(self.image_size, af=self.af)
        elif self.acq_type == 'other':
            traj = self.traj
        return traj

    def preprocessing(self, image, kspace):
        traj = self.generate_trajectory()
        interpob = self.nufft_obj._extract_nufft_interpob()
        nufftob_forw = kbnufft_forward(interpob, multiprocessing=True)
        nufftob_back = kbnufft_adjoint(interpob, multiprocessing=True)
        if self.dcomp:
            dcomp = calculate_density_compensator(
                interpob,
                nufftob_forw,
                nufftob_back,
                traj[0],
            )
        traj = tf.repeat(traj, tf.shape(image)[0], axis=0)
        orig_image_channels = ortho_ifft2d(kspace)
        if self.crop_image_data:
            image = adjust_image_size(image, self.image_size)
        nc_kspace = nufft(self.nufft_obj, orig_image_channels, traj, self.image_size, multicoil=self.multicoil)
        nc_kspace, image = scale_tensors(nc_kspace, image, scale_factor=self.scale_factor)
        image = image[..., None]
        nc_kspaces_channeled = nc_kspace[..., None]
        orig_shape = tf.ones([tf.shape(kspace)[0]], dtype=tf.int32) * self.image_size[-1]
        if not self.crop_image_data:
            output_shape = tf.shape(image)[1:][None, :]
            output_shape = tf.tile(output_shape, [tf.shape(image)[0], 1])
        extra_args = (orig_shape,)
        if self.dcomp:
            dcomp = tf.ones(
                [tf.shape(kspace)[0], tf.shape(dcomp)[0]],
                dtype=dcomp.dtype,
            ) * dcomp[None, :]
            extra_args += (dcomp,)
        model_inputs = (nc_kspaces_channeled, traj)
        if self.multicoil:
            smaps = non_cartesian_extract_smaps(nc_kspace, traj, dcomp, nufftob_back, self.image_size)
            model_inputs += (smaps,)
        if not self.crop_image_data:
            model_inputs += (output_shape,)
        model_inputs += (extra_args,)
        return model_inputs, image
예제 #14
0
class NFFTBase(Layer):
    def __init__(self,
                 multicoil=False,
                 im_size=(640, 472),
                 density_compensation=False,
                 **kwargs):
        super(NFFTBase, self).__init__(**kwargs)
        self.multicoil = multicoil
        self.im_size = im_size
        self.nufft_ob = KbNufftModule(
            im_size=im_size,
            grid_size=None,
            norm='ortho',
        )
        self.density_compensation = density_compensation
        self.forward_op = kbnufft_forward(
            self.nufft_ob._extract_nufft_interpob())
        self.backward_op = kbnufft_adjoint(
            self.nufft_ob._extract_nufft_interpob())

    def pad_for_nufft(self, image):
        return _pad_for_nufft(image, self.im_size)

    def crop_for_pad(self, image, shape):
        return _crop_for_pad(image, shape, self.im_size)

    def crop_for_nufft(self, image):
        return _crop_for_nufft(image, self.im_size)

    def op(self, inputs):
        if self.multicoil:
            image, ktraj, smaps = inputs
        else:
            image, ktraj = inputs
        # for tfkbnufft we need a coil dimension even if there is none
        image = image[:, None, ..., 0]
        if self.multicoil:
            image = image * smaps

        kspace = nufft(self.nufft_ob, image, ktraj, image_size=self.im_size)
        # TODO: get rid of shape return as not needed in the end.
        # shape is computed once in the preprocessing and passed on as is.
        shape = tf.ones([tf.shape(image)[0]],
                        dtype=tf.int32) * tf.shape(image)[-1]
        return kspace[..., None], [shape]

    def adj_op(self, inputs):
        if self.multicoil:
            if self.density_compensation:
                kspace, ktraj, smaps, shape, dcomp, = inputs
            else:
                kspace, ktraj, smaps, shape = inputs
        else:
            if self.density_compensation:
                kspace, ktraj, shape, dcomp = inputs
            else:
                kspace, ktraj, shape = inputs
        if self.density_compensation:
            kspace = tf.cast(dcomp, kspace.dtype) * kspace[..., 0]
        else:
            kspace = kspace[..., 0]
        image = self.backward_op(kspace, ktraj)
        ## image resizing
        if len(self.im_size) < 3:
            # NOTE: for now very ugly way to deal with this condition
            shape = tf.reshape(shape[0], [])
            reshaping_condition = tf.math.less(shape, self.im_size[-1])
        else:
            shape = shape[0]
            reshaping_condition = tf.reduce_any(
                tf.math.less(shape, self.im_size))
        image_reshaped = tf.cond(
            pred=reshaping_condition,
            true_fn=lambda: self.crop_for_pad(image, shape),
            false_fn=lambda: image,
        )
        if self.multicoil:
            image = tf.reduce_sum(image_reshaped * tf.math.conj(smaps), axis=1)
        else:
            image = image_reshaped[:, 0]
        image = image[..., None]
        return image
def generate_multicoil_nc_tf_records(
    acq_type='radial',
    af=4,
    mode='train',
    brain=False,
):
    if brain:
        path = Path(FASTMRI_DATA_DIR) / f'brain_multicoil_{mode}'
    else:
        path = Path(FASTMRI_DATA_DIR) / f'multicoil_{mode}'
    filenames = sorted(list(path.glob('*.h5')))
    scale_factor = 1e6
    image_size = (640, 400)
    nufft_ob = KbNufftModule(
        im_size=image_size,
        grid_size=None,
        norm='ortho',
    )

    class PreProcModel(tf.keras.models.Model):
        def __init__(self, **kwargs):
            super().__init__(**kwargs)
            interpob = nufft_ob._extract_nufft_interpob()
            self.nufftob_back = kbnufft_adjoint(interpob,
                                                multiprocessing=False)
            self.nufftob_forw = kbnufft_forward(interpob,
                                                multiprocessing=False)
            if acq_type == 'radial':
                self.traj = get_radial_trajectory(image_size, af=af)
            elif acq_type == 'cartesian':
                self.traj = get_debugging_cartesian_trajectory()
            elif acq_type == 'spiral':
                self.traj = get_spiral_trajectory(image_size, af=af)
            else:
                raise NotImplementedError(
                    f'{acq_type} dataset not implemented yet.')
            self.dcomp = calculate_density_compensator(
                interpob,
                self.nufftob_forw,
                self.nufftob_back,
                self.traj[0],
            )
            if brain:
                self.fft = FFTBase(False, multicoil=True, use_smaps=False)

        def call(self, inputs):
            images = inputs['image']
            kspaces = inputs['kspace']
            if brain:
                complex_images = self.fft.adj_op([kspaces[..., None],
                                                  None])[..., 0]
                complex_images_padded = adjust_image_size(
                    complex_images,
                    image_size,
                    multicoil=True,
                )
                kspaces = self.fft.op([complex_images_padded[..., None],
                                       None])[..., 0]
            traj = tf.repeat(self.traj, tf.shape(images)[0], axis=0)
            orig_image_channels = tf_ortho_ifft2d(kspaces)
            nc_kspace = nufft(nufft_ob,
                              orig_image_channels,
                              traj,
                              image_size,
                              multiprocessing=False)
            nc_kspace_scaled = nc_kspace * scale_factor
            images_scaled = images * scale_factor
            images_channeled = images_scaled[..., None]
            nc_kspaces_channeled = nc_kspace_scaled[..., None]
            orig_shape = tf.ones([tf.shape(kspaces)[0]],
                                 dtype=tf.int32) * tf.shape(kspaces)[-1]
            dcomp = tf.ones([tf.shape(kspaces)[0],
                             tf.shape(self.dcomp)[0]],
                            dtype=self.dcomp.dtype) * self.dcomp[None, :]
            extra_args = (orig_shape, dcomp)
            smaps = non_cartesian_extract_smaps(nc_kspace, traj, dcomp,
                                                self.nufftob_back, orig_shape)
            model_inputs = (nc_kspaces_channeled, traj, smaps, extra_args)
            if brain:
                output_shape = tf.shape(images)[1:][None, :]
                output_shape = tf.tile(output_shape, [tf.shape(images)[0], 1])
                model_inputs += (output_shape, )
            return model_inputs, images_channeled

    extension = f'_nc_{acq_type}'
    if af != 4:
        extension += f'_af{af}'
    extension += '.tfrecords'
    selection = [
        {
            'inner_slices': None,
            'rand': False
        },  # slice selection
        {
            'rand': False,
            'keep_dim': False
        },  # coil selection
    ]
    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
        preproc_model = PreProcModel()
    for filename in tqdm(filenames):
        directory = filename.parent
        filename_tfrecord = directory / (filename.stem + extension)
        if filename_tfrecord.exists():
            continue
        image, kspace, _ = from_multicoil_train_file_to_image_and_kspace_and_contrast(
            filename,
            selection=selection,
        )
        data = tf.data.Dataset.zip({
            'image':
            tf.data.Dataset.from_tensor_slices(image),
            'kspace':
            tf.data.Dataset.from_tensor_slices(kspace),
        })
        data = data.batch(len(image))
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        data = data.with_options(options)
        model_inputs, model_outputs = preproc_model.predict(data)
        with tf.io.TFRecordWriter(str(filename_tfrecord)) as writer:
            example = encode_ncmc_example(model_inputs, [model_outputs])
            writer.write(example)
예제 #16
0
def train_nc_kspace_dataset_from_indexable(
    path,
    image_size,
    inner_slices=None,
    rand=False,
    scale_factor=1,
    contrast=None,
    n_samples=None,
    acq_type='radial',
    compute_dcomp=False,
    **acq_kwargs,
):
    r"""Non-cartesian dataset for the training/validation set of single coil fastMRI.

    The undersampling is performed retrospectively on the fully-sampled kspace,
    using spiral or radial trajectories. A uniform cartesian trajectory is also
    available for debugging.
    The non-uniform Fourier transform is implemented by tfkbnufft.
    The output of the dataset is of the form:
    ```
    (retrospectively_undersampled_nc_kspace, nc_trajectory, extra_args), ground_truth_reconstruction
    ```
    where `extra_args` contains the original shape fo the image, and potentially
    the density compensation factors.

    Prefetching is performed, as well as parallel calls for preprocessing when
    rand or inner_slices are active.
    The ground truth reconstruction is read directly from the h5 file and not
    obtained through the Fourier inversion of the kspace.

    Arguments:
        path (str): the path to the fastMRI files. Should end with a `/`.
        image_size (tuple of int): the fixed image size to consider for the
            non-uniform Fourier transform. It needs to be fixed to only use
            a single plan. An image size of (640, 400) will lead to 99.5
            percents of the dataset being only padded and not cropped.
        inner_slices (int or None): the slices to consider in the volumes. The
            slicing will be performed as `inner_slices//2:-inner_slices//2`.
            If None, all slices are considered. Defaults to None.
        rand (bool): whether or not to sample one slice randomly from the
            considered slices of the volumes. If None, all slices are taken.
            Defaults to None.
        scale_factor (int or float): the multiplicative scale factor for both the
            kspace and the target reconstruction. Typically, 1e6 is a good value
            for fastMRI, because it sets the values in a range acceptable for
            neural networks training. See [R2020] (3.4 Training) for more details
            on this value. Defaults to 1.
        contrast (str or None): the contrast to select for this dataset. If None,
            all contrasts are considered. Available contrasts for fastMRI single
            coil are typically `CORPD_FBK` (Proton density) and `CORPDFS_FBK`
            (Proton density with fat suppression). Defaults to None.
        n_samples (int or None): the number of samples to consider from this set.
            If None, all samples are used. Defaults to None.
        acq_type (str): the type of non-cartesian trajectory to use. Choices are
            `radial`, `spiral` or `cartesian` (for debugging purposes). Defaults
            to radial.
        compute_dcomp (bool): whether to compute and return the density compensation
            factors. See [P1999] for more on density compensation. Defaults to
            False.
        **acq_kwargs: keyword arguments for the non-cartesian trajectory. See
            fastmri_recon/data/utils/non_cartesian.py for more info.

    Returns:
        tf.data.Dataset: the training/validation non-cartesian dataset.
    """
    selection = [{'inner_slices': inner_slices, 'rand': rand}]

    def _tf_filename_to_image_and_kspace_and_contrast(filename):
        def _from_train_file_to_image_and_kspace_and_contrast_tensor_to_tensor(
                filename):
            filename_str = filename.numpy()
            image, kspace, contrast = from_train_file_to_image_and_kspace_and_contrast(
                filename_str,
                selection=selection,
            )
            return tf.convert_to_tensor(image), tf.convert_to_tensor(
                kspace), tf.convert_to_tensor(contrast)

        [image, kspace, contrast] = tf.py_function(
            _from_train_file_to_image_and_kspace_and_contrast_tensor_to_tensor,
            [filename],
            [tf.float32, tf.complex64, tf.string],
        )
        if rand:
            n_slices = (1, )
        else:
            n_slices = (inner_slices, )
        kspace_size = n_slices + (640, None)
        image_size = n_slices + (320, 320)
        image.set_shape(image_size)
        kspace.set_shape(kspace_size)
        return image, kspace, contrast

    files_ds = tf.data.Dataset.list_files(f'{path}*.h5', shuffle=False)
    # this makes sure the file selection is happening once when using less than
    # all samples
    files_ds = files_ds.shuffle(
        buffer_size=1000,
        seed=0,
        reshuffle_each_iteration=False,
    )
    image_and_kspace_and_contrast_ds = files_ds.map(
        _tf_filename_to_image_and_kspace_and_contrast,
        num_parallel_calls=tf.data.experimental.AUTOTUNE,
    )
    # contrast filtering
    if contrast:
        image_and_kspace_and_contrast_ds = image_and_kspace_and_contrast_ds.filter(
            lambda image, kspace, tf_contrast: tf_contrast == contrast)
    image_and_kspace_ds = image_and_kspace_and_contrast_ds.map(
        lambda image, kspace, tf_contrast: (image, kspace),
        num_parallel_calls=tf.data.experimental.AUTOTUNE,
    )
    if n_samples is not None:
        image_and_kspace_ds = image_and_kspace_ds.take(n_samples)
    nufft_ob = KbNufftModule(
        im_size=image_size,
        grid_size=None,
        norm='ortho',
    )
    masked_kspace_ds = image_and_kspace_ds.map(
        non_cartesian_from_kspace_to_nc_kspace_and_traj(
            nufft_ob,
            image_size,
            acq_type=acq_type,
            scale_factor=scale_factor,
            compute_dcomp=compute_dcomp,
            **acq_kwargs,
        ),
        num_parallel_calls=tf.data.experimental.AUTOTUNE
        if rand or inner_slices is not None else None,
    ).repeat()
    if rand or inner_slices is not None:
        masked_kspace_ds = masked_kspace_ds.prefetch(
            buffer_size=tf.data.experimental.AUTOTUNE)

    return masked_kspace_ds
예제 #17
0
def generate_multicoil_nc_tf_records(
    acq_type='radial',
    af=4,
    mode='train',
):
    path = Path(FASTMRI_DATA_DIR) / f'multicoil_{mode}'
    filenames = sorted(list(path.glob('*.h5')))
    scale_factor = 1e6
    image_size = (640, 400)
    nufft_ob = KbNufftModule(
        im_size=image_size,
        grid_size=None,
        norm='ortho',
    )

    class PreProcModel(tf.keras.models.Model):
        def __init__(self, **kwargs):
            super().__init__(**kwargs)
            interpob = nufft_ob._extract_nufft_interpob()
            self.nufftob_back = kbnufft_adjoint(interpob,
                                                multiprocessing=False)
            self.nufftob_forw = kbnufft_forward(interpob,
                                                multiprocessing=False)
            if acq_type == 'radial':
                self.traj = get_radial_trajectory(image_size, af=af)
            elif acq_type == 'cartesian':
                self.traj = get_debugging_cartesian_trajectory()
            elif acq_type == 'spiral':
                self.traj = get_spiral_trajectory(image_size, af=af)
            else:
                raise NotImplementedError(
                    f'{acq_type} dataset not implemented yet.')
            self.dcomp = calculate_density_compensator(
                interpob,
                self.nufftob_forw,
                self.nufftob_back,
                self.traj[0],
            )

        def call(self, inputs):
            images, kspaces = inputs
            traj = tf.repeat(self.traj, tf.shape(images)[0], axis=0)
            orig_image_channels = tf_ortho_ifft2d(kspaces)
            nc_kspace = nufft(nufft_ob,
                              orig_image_channels,
                              traj,
                              image_size,
                              multiprocessing=False)
            nc_kspace_scaled = nc_kspace * scale_factor
            images_scaled = images * scale_factor
            images_channeled = images_scaled[..., None]
            nc_kspaces_channeled = nc_kspace_scaled[..., None]
            orig_shape = tf.ones([tf.shape(kspaces)[0]],
                                 dtype=tf.int32) * tf.shape(kspaces)[-1]
            dcomp = tf.ones([tf.shape(kspaces)[0],
                             tf.shape(self.dcomp)[0]],
                            dtype=self.dcomp.dtype) * self.dcomp[None, :]
            extra_args = (orig_shape, dcomp)
            smaps = non_cartesian_extract_smaps(nc_kspace, traj, dcomp,
                                                self.nufftob_back, orig_shape)
            return (nc_kspaces_channeled, traj, smaps,
                    extra_args), images_channeled

    extension = f'_nc_{acq_type}.tfrecords'
    selection = [
        {
            'inner_slices': None,
            'rand': False
        },  # slice selection
        {
            'rand': False,
            'keep_dim': False
        },  # coil selection
    ]
    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
        preproc_model = PreProcModel()
    for filename in tqdm(filenames):
        directory = filename.parent
        filename_tfrecord = directory / (filename.stem + extension)
        if filename_tfrecord.exists():
            continue
        image, kspace, _ = from_multicoil_train_file_to_image_and_kspace_and_contrast(
            filename,
            selection=selection,
        )
        model_inputs, model_outputs = preproc_model.predict([image, kspace])
        with tf.io.TFRecordWriter(str(filename_tfrecord)) as writer:
            example = encode_ncmc_example(model_inputs, [model_outputs])
            writer.write(example)
class NFFTBase(Layer):
    def __init__(self, multicoil=False, im_size=(640, 472), density_compensation=False, **kwargs):
        super(NFFTBase, self).__init__(**kwargs)
        self.multicoil = multicoil
        self.im_size = im_size
        self.nufft_ob = KbNufftModule(
            im_size=im_size,
            grid_size=None,
            norm='ortho',
        )
        self.density_compensation = density_compensation
        self.forward_op = kbnufft_forward(self.nufft_ob._extract_nufft_interpob())
        self.backward_op = kbnufft_adjoint(self.nufft_ob._extract_nufft_interpob())

    def pad_for_nufft(self, image):
        return _pad_for_nufft(image, self.im_size)

    def crop_for_pad(self, image, shape):
        return _crop_for_pad(image, shape, self.im_size)

    def crop_for_nufft(self, image):
        return _crop_for_nufft(image, self.im_size)

    def op(self, inputs):
        if self.multicoil:
            image, ktraj, smaps = inputs
        else:
            image, ktraj = inputs
        # for tfkbnufft we need a coil dimension even if there is none
        image = image[:, None, ..., 0]
        if self.multicoil:
            image = image * smaps

        kspace = nufft(self.nufft_ob, image, ktraj, image_size=self.im_size)
        shape = tf.ones([tf.shape(image)[0]], dtype=tf.int32) * tf.shape(image)[-1]
        return kspace[..., None], [shape]

    def adj_op(self, inputs):
        if self.multicoil:
            if self.density_compensation:
                kspace, ktraj, smaps, shape, dcomp, = inputs
            else:
                kspace, ktraj, smaps, shape = inputs
        else:
            if self.density_compensation:
                kspace, ktraj, shape, dcomp = inputs
            else:
                kspace, ktraj, shape = inputs
        shape = tf.reshape(shape[0], [])
        if self.density_compensation:
            kspace = tf.cast(dcomp, kspace.dtype) * kspace[..., 0]
        else:
            kspace = kspace[..., 0]
        image = self.backward_op(kspace, ktraj)
        image_reshaped = tf.cond(
            tf.math.greater_equal(shape, self.im_size[-1]),
            lambda: image,
            lambda: self.crop_for_pad(image, shape),
        )
        if self.multicoil:
            image = tf.reduce_sum(image_reshaped * tf.math.conj(smaps), axis=1)
        else:
            image = image_reshaped[:, 0]
        image = image[..., None]
        return image