Exemplo n.º 1
0
def test_resampling_fill_value():
    """ Test resampling with a non-zero fill value
    """
    prng = np.random.RandomState(10)

    data_3d = prng.rand(1, 4, 4)
    data_4d = prng.rand(1, 4, 4, 3)

    angle = np.pi / 4
    rot = rotation(0, angle)

    # Try a few different fill values
    for data in [data_3d, data_4d]:
        for val in (-3.75, 0):
            if val:
                rot_img = resample_img(Nifti1Image(data, np.eye(4)),
                                       target_affine=rot,
                                       interpolation='nearest',
                                       fill_value=val,
                                       clip=False)
            else:
                rot_img = resample_img(Nifti1Image(data, np.eye(4)),
                                       target_affine=rot,
                                       interpolation='nearest',
                                       clip=False)
            assert_equal(rot_img.get_data().flatten()[0], val)

            rot_img2 = resample_to_img(Nifti1Image(data, np.eye(4)),
                                       rot_img,
                                       interpolation='nearest',
                                       fill_value=val)
            assert_equal(rot_img2.get_data().flatten()[0], val)
Exemplo n.º 2
0
def test_resampling_fill_value():
    """ Test resampling with a non-zero fill value
    """
    prng = np.random.RandomState(10)

    data_3d = prng.rand(1, 4, 4)
    data_4d = prng.rand(1, 4, 4, 3)

    angle = np.pi/4
    rot = rotation(0, angle)

    # Try a few different fill values
    for data in [data_3d, data_4d]:
        for val in (-3.75, 0):
            if val:
                rot_img = resample_img(Nifti1Image(data, np.eye(4)),
                                       target_affine=rot,
                                       interpolation='nearest',
                                       fill_value=val,
                                       clip=False)
            else:
                rot_img = resample_img(Nifti1Image(data, np.eye(4)),
                                       target_affine=rot,
                                       interpolation='nearest',
                                       clip=False)
            assert_equal(rot_img.get_data().flatten()[0],
                         val)

            rot_img2 = resample_to_img(Nifti1Image(data, np.eye(4)),
                                       rot_img,
                                       interpolation='nearest',
                                       fill_value=val)
            assert_equal(rot_img2.get_data().flatten()[0],
                         val)
def linear_downsampling_generator(generator,
                                  max_downsampling_factor=2,
                                  isotropic=False):
    '''
    Downsamples each sample (linearly) by a random factor and upsamples to original resolution again (nearest neighbor).

    Info:
    * Uses nilearn resample_img for resampling.
    * If isotropic=True:  Resamples all dimensions (channels, x, y, z) with same downsampling factor
    * If isotropic=False: Randomly choose new downsampling factor for each dimension
    * Does not resample "seg".
    '''
    import nibabel as nib
    from nilearn.image.resampling import resample_img, resample_to_img

    for data_dict in generator:
        assert "data" in list(
            data_dict.keys()
        ), "your data generator needs to return a python dictionary with at least a 'data' key value pair"

        data = data_dict[
            'data']  # shape of data must be: (batch_size, nr_of_channels, x, y, [z])  (z ist optional; nr_of_channels can be 1)
        dim = len(
            data.shape[2:])  # remove batch_size and nr_of_channels dimension
        for sample_idx in range(data.shape[0]):

            fact = random.uniform(1, max_downsampling_factor)

            for channel_idx in range(data.shape[1]):

                affine = np.identity(4)
                if dim == 3:
                    img_data = data[sample_idx, channel_idx]
                elif dim == 2:
                    tmp = data[sample_idx, channel_idx]
                    img_data = np.reshape(
                        tmp, (1, tmp.shape[0], tmp.shape[1])
                    )  # add third spatial dimension to make resample_img work
                else:
                    raise ValueError("Invalid dimension size")

                image = nib.Nifti1Image(img_data, affine)
                affine2 = affine
                if isotropic:
                    affine2[0, 0] = fact
                    affine2[1, 1] = fact
                    affine2[2, 2] = fact
                else:
                    affine2[0, 0] = random.uniform(1, max_downsampling_factor)
                    affine2[1, 1] = random.uniform(1, max_downsampling_factor)
                    affine2[2, 2] = random.uniform(1, max_downsampling_factor)
                affine2[3, 3] = 1
                image2 = resample_img(image,
                                      target_affine=affine2,
                                      interpolation='continuous')
                image3 = resample_to_img(image2, image, 'nearest')
                data[sample_idx, channel_idx] = np.squeeze(image3.get_data())

        data_dict["data"] = data
        yield data_dict
Exemplo n.º 4
0
def test_resample_identify_affine_int_translation():
    # Testing resample to img function
    rand_gen = np.random.RandomState(0)

    source_shape = (6, 4, 6)
    source_affine = np.eye(4)
    source_affine[:, 3] = np.append(np.random.randint(0, 4, 3), 1)
    source_data = rand_gen.random_sample(source_shape)
    source_img = Nifti1Image(source_data, source_affine)

    target_shape = (11, 10, 9)
    target_data = np.zeros(target_shape)
    target_affine = source_affine
    target_affine[:3, 3] -= 3  # add an offset of 3 in x, y, z
    target_data[3:9, 3:7,
                3:9] = source_data  # put the data at the offset location
    target_img = Nifti1Image(target_data, target_affine)

    result_img = resample_to_img(source_img,
                                 target_img,
                                 interpolation='nearest')
    np.testing.assert_almost_equal(target_img.get_data(),
                                   result_img.get_data())

    result_img_2 = resample_to_img(result_img,
                                   source_img,
                                   interpolation='nearest')
    np.testing.assert_almost_equal(source_img.get_data(),
                                   result_img_2.get_data())

    result_img_3 = resample_to_img(result_img,
                                   source_img,
                                   interpolation='nearest',
                                   force_resample=True)
    np.testing.assert_almost_equal(result_img_2.get_data(),
                                   result_img_3.get_data())

    result_img_4 = resample_to_img(source_img,
                                   target_img,
                                   interpolation='nearest',
                                   force_resample=True)
    np.testing.assert_almost_equal(target_img.get_data(),
                                   result_img_4.get_data())
def linear_downsampling_generator(generator, max_downsampling_factor=2, isotropic=False):
    '''
    Downsamples each sample (linearly) by a random factor and upsamples to original resolution again (nearest neighbor).

    Info:
    * Uses nilearn resample_img for resampling.
    * If isotropic=True:  Resamples all dimensions (channels, x, y, z) with same downsampling factor
    * If isotropic=False: Randomly choose new downsampling factor for each dimension
    * Does not resample "seg".
    '''
    import nibabel as nib
    from nilearn.image.resampling import resample_img, resample_to_img

    for data_dict in generator:
        assert "data" in list(
            data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"

        data = data_dict[
            'data']  # shape of data must be: (batch_size, nr_of_channels, x, y, [z])  (z ist optional; nr_of_channels can be 1)
        dim = len(data.shape[2:])  # remove batch_size and nr_of_channels dimension
        for sample_idx in range(data.shape[0]):

            fact = random.uniform(1, max_downsampling_factor)

            for channel_idx in range(data.shape[1]):

                affine = np.identity(4)
                if dim == 3:
                    img_data = data[sample_idx, channel_idx]
                elif dim == 2:
                    tmp = data[sample_idx, channel_idx]
                    img_data = np.reshape(tmp, (
                    1, tmp.shape[0], tmp.shape[1]))  # add third spatial dimension to make resample_img work
                else:
                    raise ValueError("Invalid dimension size")

                image = nib.Nifti1Image(img_data, affine)
                affine2 = affine
                if isotropic:
                    affine2[0, 0] = fact
                    affine2[1, 1] = fact
                    affine2[2, 2] = fact
                else:
                    affine2[0, 0] = random.uniform(1, max_downsampling_factor)
                    affine2[1, 1] = random.uniform(1, max_downsampling_factor)
                    affine2[2, 2] = random.uniform(1, max_downsampling_factor)
                affine2[3, 3] = 1
                image2 = resample_img(image, target_affine=affine2, interpolation='continuous')
                image3 = resample_to_img(image2, image, 'nearest')
                data[sample_idx, channel_idx] = np.squeeze(image3.get_data())

        data_dict["data"] = data
        yield data_dict
Exemplo n.º 6
0
def augment_linear_downsampling_nilearn(data,
                                        max_downsampling_factor=2,
                                        isotropic=False):
    '''
    Downsamples each sample (linearly) by a random factor and upsamples to original resolution again (nearest neighbor).

    Info:
    * Uses nilearn resample_img for resampling.
    * If isotropic=True:  Resamples all channels (channels, x, y, z) with same downsampling factor
    * If isotropic=False: Randomly choose new downsampling factor for each dimension
    '''
    import nibabel as nib
    from nilearn.image.resampling import resample_img, resample_to_img

    dim = len(data.shape[2:])  # remove batch_size and nr_of_channels dimension
    for sample_idx in range(data.shape[0]):

        fact = random.uniform(1, max_downsampling_factor)

        for channel_idx in range(data.shape[1]):

            affine = np.identity(4)
            if dim == 3:
                img_data = data[sample_idx, channel_idx]
            elif dim == 2:
                tmp = data[sample_idx, channel_idx]
                img_data = np.reshape(
                    tmp, (1, tmp.shape[0], tmp.shape[1])
                )  # add third spatial dimension to make resample_img work
            else:
                raise ValueError("Invalid dimension size")

            image = nib.Nifti1Image(img_data, affine)
            affine2 = affine
            if isotropic:
                affine2[0, 0] = fact
                affine2[1, 1] = fact
                affine2[2, 2] = fact
            else:
                affine2[0, 0] = random.uniform(1, max_downsampling_factor)
                affine2[1, 1] = random.uniform(1, max_downsampling_factor)
                affine2[2, 2] = random.uniform(1, max_downsampling_factor)
            affine2[3, 3] = 1
            image2 = resample_img(image,
                                  target_affine=affine2,
                                  interpolation='continuous')
            image3 = resample_to_img(image2, image, 'nearest')

        data[sample_idx, channel_idx] = np.squeeze(image3.get_data())
    return data
def augment_linear_downsampling_nilearn(data, max_downsampling_factor=2, isotropic=False):
    '''
    Downsamples each sample (linearly) by a random factor and upsamples to original resolution again (nearest neighbor).

    Info:
    * Uses nilearn resample_img for resampling.
    * If isotropic=True:  Resamples all channels (channels, x, y, z) with same downsampling factor
    * If isotropic=False: Randomly choose new downsampling factor for each dimension
    '''
    import nibabel as nib
    from nilearn.image.resampling import resample_img, resample_to_img

    dim = len(data.shape[2:])  # remove batch_size and nr_of_channels dimension
    for sample_idx in range(data.shape[0]):

        fact = random.uniform(1, max_downsampling_factor)

        for channel_idx in range(data.shape[1]):

            affine = np.identity(4)
            if dim == 3:
                img_data = data[sample_idx, channel_idx]
            elif dim == 2:
                tmp = data[sample_idx, channel_idx]
                img_data = np.reshape(tmp, (
                1, tmp.shape[0], tmp.shape[1]))  # add third spatial dimension to make resample_img work
            else:
                raise ValueError("Invalid dimension size")

            image = nib.Nifti1Image(img_data, affine)
            affine2 = affine
            if isotropic:
                affine2[0, 0] = fact
                affine2[1, 1] = fact
                affine2[2, 2] = fact
            else:
                affine2[0, 0] = random.uniform(1, max_downsampling_factor)
                affine2[1, 1] = random.uniform(1, max_downsampling_factor)
                affine2[2, 2] = random.uniform(1, max_downsampling_factor)
            affine2[3, 3] = 1
            image2 = resample_img(image, target_affine=affine2, interpolation='continuous')
            image3 = resample_to_img(image2, image, 'nearest')

        data[sample_idx, channel_idx] = np.squeeze(image3.get_data())
    return data
Exemplo n.º 8
0
def test_resample_to_img():
    # Testing resample to img function
    rand_gen = np.random.RandomState(0)
    shape = (6, 3, 6, 3)
    data = rand_gen.random_sample(shape)

    source_affine = np.eye(4)
    source_img = Nifti1Image(data, source_affine)

    target_affine = 2 * source_affine
    target_img = Nifti1Image(data, target_affine)

    result_img = resample_to_img(source_img,
                                 target_img,
                                 interpolation='nearest')

    downsampled = data[::2, ::2, ::2, ...]
    x, y, z = downsampled.shape[:3]
    np.testing.assert_almost_equal(downsampled,
                                   result_img.get_data()[:x, :y, :z, ...])
Exemplo n.º 9
0
def test_resample_to_img():
    # Testing resample to img function
    rand_gen = np.random.RandomState(0)
    shape = (6, 3, 6, 3)
    data = rand_gen.random_sample(shape)

    source_affine = np.eye(4)
    source_img = Nifti1Image(data, source_affine)

    target_affine = 2 * source_affine
    target_img = Nifti1Image(data, target_affine)


    result_img = resample_to_img(source_img, target_img,
                                 interpolation='nearest')

    downsampled = data[::2, ::2, ::2, ...]
    x, y, z = downsampled.shape[:3]
    np.testing.assert_almost_equal(downsampled,
                                   result_img.get_data()[:x, :y, :z, ...])
Exemplo n.º 10
0
def vol_to_surf(img,
                surf_mesh,
                radius=3.,
                interpolation='linear',
                kind='line',
                n_samples=None,
                mask_img=None):
    """Extract surface data from a Nifti image.

    .. versionadded:: 0.4.0

    Parameters
    ----------

    img : Niimg-like object, 3d or 4d.
        See http://nilearn.github.io/manipulating_images/input_output.html

    surf_mesh : str or numpy.ndarray
        Either a file containing surface mesh geometry (valid formats
        are .gii or Freesurfer specific files such as .orig, .pial,
        .sphere, .white, .inflated) or a list of two Numpy arrays,
        the first containing the x-y-z coordinates of the mesh
        vertices, the second containing the indices (into coords)
        of the mesh faces.

    radius : float, optional (default=3.).
        The size (in mm) of the neighbourhood from which samples are drawn
        around each node.

    interpolation : {'linear', 'nearest'}
        How the image intensity is measured at a sample point.

        - 'linear' (the default):
            Use a trilinear interpolation of neighboring voxels.
        - 'nearest':
            Use the intensity of the nearest voxel.

        For one image, the speed difference is small, 'linear' takes about x1.5
        more time. For many images, 'nearest' scales much better, up to x20
        faster.

    kind : {'line', 'ball'}
        The strategy used to sample image intensities around each vertex.

        - 'line' (the default):
            samples are regularly spaced along the normal to the mesh, over the
            interval [- `radius`, + `radius`].
            (sometimes called thickness sampling)
        - 'ball':
            samples are regularly spaced inside a ball centered at the mesh
            vertex.

    n_samples : int or None, optional (default=None)
        How many samples are drawn around each vertex and averaged. If
        ``None``, use a reasonable default for the chosen sampling strategy
        (20 for 'ball' or 10 for 'line').
        For performance reasons, if using `kind` ="ball", choose `n_samples` in
        [10, 20, 40, 80, 160] (default is 20), because cached positions are
        available.

    mask_img : Niimg-like object or None, optional (default=None)
        Samples falling out of this mask or out of the image are ignored.
        If ``None``, don't apply any mask.

    Returns
    -------
    texture : numpy.ndarray, 1d or 2d.
        If 3D image is provided, a 1d vector is returned, containing one value
        for each mesh node.
        If 4D image is provided, a 2d array is returned, where each row
        corresponds to a mesh node.

    Notes
    -----
    This function computes a value for each vertex of the mesh. In order to do
    so, it selects a few points in the volume surrounding that vertex,
    interpolates the image intensities at these sampling positions, and
    averages the results.

    Two strategies are available to select these positions.
        - 'ball' uses points regularly spaced in a ball centered at the mesh
            vertex. The radius of the ball is controlled by the parameter
            `radius`.
        - 'line' starts by drawing the normal to the mesh passing through this
            vertex. It then selects a segment of this normal, centered at the
            vertex, of length 2 * `radius`. Image intensities are measured at
            points regularly spaced on this normal segment.

    You can control how many samples are drawn by setting `n_samples`.

    Once the sampling positions are chosen, those that fall outside of the 3d
    image (or outside of the mask if you provided one) are discarded. If all
    sample positions are discarded (which can happen, for example, if the
    vertex itself is outside of the support of the image), the projection at
    this vertex will be ``numpy.nan``.

    The 3d image then needs to be interpolated at each of the remaining points.
    Two options are available: 'nearest' selects the value of the nearest
    voxel, and 'linear' performs trilinear interpolation of neighbouring
    voxels. 'linear' may give better results - for example, the projected
    values are more stable when resampling the 3d image or applying affine
    transformations to it. For one image, the speed difference is small,
    'linear' takes about x1.5 more time. For many images, 'nearest' scales much
    better, up to x20 faster.

    Once the 3d image has been interpolated at each sample point, the
    interpolated values are averaged to produce the value associated to this
    particular mesh vertex.

    WARNING: This function is experimental and details such as the
    interpolation method are subject to change.

    """
    sampling_schemes = {
        'linear': _interpolation_sampling,
        'nearest': _nearest_voxel_sampling
    }
    if interpolation not in sampling_schemes:
        raise ValueError('"interpolation" should be one of {}'.format(
            tuple(sampling_schemes.keys())))
    img = load_img(img)
    if mask_img is not None:
        mask_img = _utils.check_niimg(mask_img)
        mask = get_data(
            resampling.resample_to_img(mask_img,
                                       img,
                                       interpolation='nearest',
                                       copy=False))
    else:
        mask = None
    original_dimension = len(img.shape)
    img = _utils.check_niimg(img, atleast_4d=True)
    frames = np.rollaxis(get_data(img), -1)
    mesh = load_surf_mesh(surf_mesh)
    sampling = sampling_schemes[interpolation]
    texture = sampling(frames,
                       mesh,
                       img.affine,
                       radius=radius,
                       kind=kind,
                       n_points=n_samples,
                       mask=mask)
    if original_dimension == 3:
        texture = texture[0]
    return texture.T
Exemplo n.º 11
0
def vol_to_surf(img, surf_mesh,
                radius=3., interpolation='linear', kind='auto',
                n_samples=None, mask_img=None, inner_mesh=None, depth=None):
    """Extract surface data from a Nifti image.

    .. versionadded:: 0.4.0

    Parameters
    ----------
    img : Niimg-like object, 3d or 4d.
        See http://nilearn.github.io/manipulating_images/input_output.html

    surf_mesh : str or numpy.ndarray or Mesh
        Either a file containing surface mesh geometry (valid formats
        are .gii or Freesurfer specific files such as .orig, .pial,
        .sphere, .white, .inflated) or two Numpy arrays organized in a list,
        tuple or a namedtuple with the fields "coordinates" and "faces", or
        a Mesh object with "coordinates" and "faces" attributes.

    radius : float, optional
        The size (in mm) of the neighbourhood from which samples are drawn
        around each node. Ignored if `inner_mesh` is provided.
        Default=3.0.

    interpolation : {'linear', 'nearest'}, optional
        How the image intensity is measured at a sample point.
        Default='linear'.

        - 'linear':
            Use a trilinear interpolation of neighboring voxels.
        - 'nearest':
            Use the intensity of the nearest voxel.

        For one image, the speed difference is small, 'linear' takes about x1.5
        more time. For many images, 'nearest' scales much better, up to x20
        faster.

    kind : {'auto', 'depth', 'line', 'ball'}, optional
        The strategy used to sample image intensities around each vertex.
        Default='auto'.

        - 'auto':
            Chooses 'depth' if `inner_mesh` is provided and 'line' otherwise.
        - 'depth':
            `inner_mesh` must be a mesh whose nodes correspond to those in
            `surf_mesh`. For example, `inner_mesh` could be a white matter
            surface mesh and `surf_mesh` a pial surface mesh. Samples are
            placed between each pair of corresponding nodes at the specified
            cortical depths (regularly spaced by default, see `depth`
            parameter).
        - 'line':
            Samples are placed along the normal to the mesh, at the positions
            specified by `depth`, or by default regularly spaced over the
            interval [- `radius`, + `radius`].
        - 'ball':
            Samples are regularly spaced inside a ball centered at the mesh
            vertex.

    n_samples : int or None, optional
        How many samples are drawn around each vertex and averaged. If
        ``None``, use a reasonable default for the chosen sampling strategy
        (20 for 'ball' or 10 for 'line').
        For performance reasons, if using `kind` ="ball", choose `n_samples` in
        [10, 20, 40, 80, 160] (default is 20), because cached positions are
        available.

    mask_img : Niimg-like object or None, optional
        Samples falling out of this mask or out of the image are ignored.
        If ``None``, don't apply any mask.

    inner_mesh : str or numpy.ndarray, optional
        Either a file containing a surface mesh or a pair of ndarrays
        (coordinates, triangles). If provided this is an inner surface that is
        nested inside the one represented by `surf_mesh` -- e.g. `surf_mesh` is
        a pial surface and `inner_mesh` a white matter surface. In this case
        nodes in both meshes must correspond: node i in `surf_mesh` is just
        across the gray matter thickness from node i in `inner_mesh`. Image
        values for index i are then sampled along the line joining these two
        points (if `kind` is 'auto' or 'depth').

    depth : sequence of floats or None, optional
        The cortical depth of samples. If provided, n_samples is ignored.
        When `inner_mesh` is provided, each element of `depth` is a fraction of
        the distance from `mesh` to `inner_mesh`: 0 is exactly on the outer
        surface, .5 is halfway, 1. is exactly on the inner surface. `depth`
        entries can be negative or greater than 1.
        When `inner_mesh` is not provided and `kind` is "line", each element of
        `depth` is a fraction of `radius` along the inwards normal at each mesh
        node. For example if `radius==1` and `depth==[-.5, 0.]`, for each node
        values will be sampled .5 mm outside of the surface and exactly at the
        node position.
        This parameter is not supported for the "ball" strategy so passing
        `depth` when `kind=="ball"` results in a `ValueError`.

    Returns
    -------
    texture : numpy.ndarray, 1d or 2d.
        If 3D image is provided, a 1d vector is returned, containing one value
        for each mesh node.
        If 4D image is provided, a 2d array is returned, where each row
        corresponds to a mesh node.

    Notes
    -----
    This function computes a value for each vertex of the mesh. In order to do
    so, it selects a few points in the volume surrounding that vertex,
    interpolates the image intensities at these sampling positions, and
    averages the results.

    Three strategies are available to select these positions.

        - with 'depth', data is sampled at various cortical depths between
          corresponding nodes of `surface_mesh` and `inner_mesh` (which can be,
          for example, a pial surface and a white matter surface).
        - 'ball' uses points regularly spaced in a ball centered at the mesh
          vertex. The radius of the ball is controlled by the parameter
          `radius`.
        - 'line' starts by drawing the normal to the mesh passing through this
          vertex. It then selects a segment of this normal, centered at the
          vertex, of length 2 * `radius`. Image intensities are measured at
          points regularly spaced on this normal segment, or at positions
          determined by `depth`.
        - ('auto' chooses 'depth' if `inner_mesh` is provided and 'line'
          otherwise)

    You can control how many samples are drawn by setting `n_samples`, or their
    position by setting `depth`.

    Once the sampling positions are chosen, those that fall outside of the 3d
    image (or outside of the mask if you provided one) are discarded. If all
    sample positions are discarded (which can happen, for example, if the
    vertex itself is outside of the support of the image), the projection at
    this vertex will be ``numpy.nan``.

    The 3d image then needs to be interpolated at each of the remaining points.
    Two options are available: 'nearest' selects the value of the nearest
    voxel, and 'linear' performs trilinear interpolation of neighbouring
    voxels. 'linear' may give better results - for example, the projected
    values are more stable when resampling the 3d image or applying affine
    transformations to it. For one image, the speed difference is small,
    'linear' takes about x1.5 more time. For many images, 'nearest' scales much
    better, up to x20 faster.

    Once the 3d image has been interpolated at each sample point, the
    interpolated values are averaged to produce the value associated to this
    particular mesh vertex.

    Warnings
    --------
    This function is experimental and details such as the interpolation method
    are subject to change.

    """
    sampling_schemes = {'linear': _interpolation_sampling,
                        'nearest': _nearest_voxel_sampling}
    if interpolation not in sampling_schemes:
        raise ValueError('"interpolation" should be one of {}'.format(
            tuple(sampling_schemes.keys())))
    img = load_img(img)
    if mask_img is not None:
        mask_img = _utils.check_niimg(mask_img)
        mask = get_data(resampling.resample_to_img(
            mask_img, img, interpolation='nearest', copy=False))
    else:
        mask = None
    original_dimension = len(img.shape)
    img = _utils.check_niimg(img, atleast_4d=True)
    frames = np.rollaxis(get_data(img), -1)
    mesh = load_surf_mesh(surf_mesh)
    if inner_mesh is not None:
        inner_mesh = load_surf_mesh(inner_mesh)
    sampling = sampling_schemes[interpolation]
    texture = sampling(
        frames, mesh, img.affine, radius=radius, kind=kind,
        n_points=n_samples, mask=mask, inner_mesh=inner_mesh, depth=depth)
    if original_dimension == 3:
        texture = texture[0]
    return texture.T
Exemplo n.º 12
0
# Prepare data
adhd_dataset = datasets.fetch_adhd(n_subjects=40, data_dir=DATADIR)
func_filenames = adhd_dataset.func
print("Functional nifti image: {0}...{1} ({2})".format(func_filenames[0],
                                                       func_filenames[1],
                                                       len(func_filenames)))

# Build an EPI-based mask because we have no anatomical data
if not os.path.isfile(MASKFILE):
    target_img = nibabel.load(func_filenames[0])
    target_mask = (target_img.get_data()[..., 0] != 0).astype(int)
    template = nibabel.load(SEGFILE)
    struct = nibabel.load(STRUCTFILE)
    resampled_template = resample_to_img(template,
                                         target_img,
                                         interpolation="nearest")
    resampled_struct = resample_to_img(struct,
                                       target_img,
                                       interpolation="nearest")
    mask = (resampled_template.get_data() == 2).astype(float)
    # mask = ndimage.gaussian_filter(mask, sigma=1.25)
    mask = (mask >= 0.3).astype(int)
    mask = mask & resampled_struct.get_data() & target_mask
    mask_img = nibabel.Nifti1Image(mask, target_img.affine)
    nibabel.save(mask_img, MASKFILE)
else:
    mask_img = nibabel.load(MASKFILE)

# Mask and preproc EPI data
# Build an EPI-based mask because we have no anatomical data