Ejemplo n.º 1
0
def load_data(data, n_echos=None):
    """Coerce input `data` files to required 3D array output.

    Parameters
    ----------
    data : :obj:`list` of img_like, :obj:`list` of :obj:`str`, :obj:`str`, or img_like
        A list of echo-wise img objects or paths to files.
        Single img objects or filenames are allowed as well, to support z-concatenated data.
    n_echos : :obj:`int`, optional
        Number of echos in provided data array. Only necessary if `data` is a single,
        z-concatenated file. Default: None

    Returns
    -------
    fdata : (S x E x T) :obj:`numpy.ndarray`
        Output data where `S` is samples, `E` is echos, and `T` is time.
    ref_img : img_like
        Reference image object for saving output files.
    """
    if n_echos is None and (isinstance(data, str) or len(data) == 1):
        raise ValueError(
            "Number of echos must be specified when a single z-concatenated file is supplied."
        )

    if not isinstance(data, (list, str, nib.spatialimages.SpatialImage)):
        raise TypeError(f"Unsupported type: {type(data)}")
    elif isinstance(data, list):
        for item in data:
            if not isinstance(item, (str, nib.spatialimages.SpatialImage)):
                raise TypeError(f"Unsupported type: {type(item)}")

        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError(
                f"Cannot run `tedana` with only two echos: {data}")
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.reshape_niimg(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    # Z-concatenated file/img
    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.reshape_niimg(img.get_fdata().reshape(nx,
                                                        ny,
                                                        nz,
                                                        n_echos,
                                                        -1,
                                                        order="F"))
    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz, 1)),
                            affine=img.affine,
                            header=img.header,
                            extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
Ejemplo n.º 2
0
def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    with pytest.raises(TypeError, match='nibabel format'):
        _utils.check_niimg(0)

    # check error for non-forced but necessary resampling
    with pytest.raises(TypeError, match='empty object'):
        _utils.check_niimg([])

    # Test dimensionality error
    img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4))
    with pytest.raises(TypeError,
                       match="Input data has incompatible dimensionality: "
                       "Expected dimension is 3D and you provided a list "
                       "of 3D images \\(4D\\)."):
        _utils.check_niimg_3d([img, img])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)

    # check data dtype equal with dtype='auto'
    img_check = _utils.check_niimg_3d(img, dtype='auto')
    assert get_data(img).dtype.kind == get_data(img_check).dtype.kind
def test_iter_check_niimgs():
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        assert_raises_regex(ValueError,
                            "Input niimgs list is empty.",
                            list, _iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    assert_raises_regex(ValueError,
                        no_file_matching % nofile_path,
                        list, _iter_check_niimg(nofile_path))

    # Create a test file
    filename = tempfile.mktemp(prefix="nilearn_test",
                               suffix=".nii",
                               dir=None)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_4d).get_data())
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_2_4d).get_data())
Ejemplo n.º 4
0
def test_check_niimg():
    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError, 'image',
                         _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError, 'image',
                         _utils.check_niimg, [])

    # Test ensure_3d
    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError, '3D',
                         _utils.check_niimg, ['test.nii', ], ensure_3d=True)

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg(filename)

    # Test ensure_3d with a in-memory object
    assert_raises_regexp(TypeError, '3D',
                         _utils.check_niimg, data, ensure_3d=True)

    # Test ensure_3d with a non 3D image
    assert_raises_regexp(TypeError, '3D',
                         _utils.check_niimg, data_img, ensure_3d=True)

    # Test ensure_3d with a 4D image with a length 1 4th dim
    data = np.zeros((40, 40, 40, 1))
    data_img = Nifti1Image(data, np.eye(4))
    _utils.check_niimg(data_img, ensure_3d=True)
Ejemplo n.º 5
0
def test_check_niimg():
    affine = np.eye(4)
    img_3d = Nifti1Image(np.ones((10, 10, 10)), affine)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_3_3d = [[[img_3d, img_3d]]]
    img_2_4d = [[img_4d, img_4d]]

    assert_raises_regex(DimensionError,
                        "Input data has incompatible dimensionality: "
                        "Expected dimension is 2D and you provided "
                        "a list of list of list of 3D images \(6D\)",
                        _utils.check_niimg,
                        img_3_3d,
                        ensure_ndim=2)

    assert_raises_regex(DimensionError,
                        "Input data has incompatible dimensionality: "
                        "Expected dimension is 4D and you provided "
                        "a list of list of 4D images \(6D\)",
                        _utils.check_niimg,
                        img_2_4d,
                        ensure_ndim=4)

    # check data dtype equal with dtype='auto'
    img_3d_check = _utils.check_niimg(img_3d, dtype='auto')
    assert_equal(img_3d.get_data().dtype.kind,
                 img_3d_check.get_data().dtype.kind)

    img_4d_check = _utils.check_niimg(img_4d, dtype='auto')
    assert_equal(img_4d.get_data().dtype.kind,
                 img_4d_check.get_data().dtype.kind)
Ejemplo n.º 6
0
def test_iter_check_niimgs(tmp_path):
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        with pytest.raises(ValueError, match="Input niimgs list is empty."):
            list(_iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    with pytest.raises(ValueError, match=no_file_matching % nofile_path):
        list(_iter_check_niimg(nofile_path))

    # Create a test file
    fd, filename = tempfile.mkstemp(prefix="nilearn_test",
                                    suffix=".nii",
                                    dir=str(tmp_path))
    os.close(fd)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(get_data(niimgs[0]),
                       get_data(_utils.check_niimg(img_4d)))
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(get_data(niimgs[0]),
                       get_data(_utils.check_niimg(img_2_4d)))
Ejemplo n.º 7
0
def test_iter_check_niimgs():
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        assert_raises_regex(ValueError, "Input niimgs list is empty.", list,
                            _iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    assert_raises_regex(ValueError, no_file_matching % nofile_path, list,
                        _iter_check_niimg(nofile_path))

    # Create a test file
    filename = tempfile.mktemp(prefix="nilearn_test", suffix=".nii", dir=None)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_4d).get_data())
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_2_4d).get_data())
Ejemplo n.º 8
0
def load_image(data):
    """
    Takes input `data` and returns a sample x time array

    Parameters
    ----------
    data : (X x Y x Z [x T]) array_like or img_like object
        Data array or data file to be loaded and reshaped

    Returns
    -------
    fdata : (S [x T]) :obj:`numpy.ndarray`
        Reshaped `data`, where `S` is samples and `T` is time
    """

    if isinstance(data, str):
        if get_dtype(data) == 'GIFTI':
            fdata = np.column_stack([f.data for f in nib.load(data).darrays])
            return fdata
        elif get_dtype(data) == 'NIFTI':
            data = check_niimg(data).get_data()
    elif isinstance(data, nib.spatialimages.SpatialImage):
        data = check_niimg(data).get_data()

    fdata = data.reshape((-1,) + data.shape[3:]).squeeze()

    return fdata
Ejemplo n.º 9
0
def load_image(data):
    """
    Takes input `data` and returns a sample x time array

    Parameters
    ----------
    data : (X x Y x Z [x T]) array_like or img_like object
        Data array or data file to be loaded and reshaped

    Returns
    -------
    fdata : (S [x T]) :obj:`numpy.ndarray`
        Reshaped `data`, where `S` is samples and `T` is time
    """

    if isinstance(data, str):
        if get_dtype(data) == 'GIFTI':
            fdata = np.column_stack([f.data for f in nib.load(data).darrays])
            return fdata
        elif get_dtype(data) == 'NIFTI':
            data = check_niimg(data).get_data()
    elif isinstance(data, nib.spatialimages.SpatialImage):
        data = check_niimg(data).get_data()

    fdata = data.reshape((-1,) + data.shape[3:]).squeeze()

    return fdata
Ejemplo n.º 10
0
def get_labels(infile, label_img):
    """
    Averages data in `infile` for each region in `label_img`

    Parameters
    ----------
    infile : str
        File with data to extract
    label_img : str
        File with regions to define extraction

    Returns
    -------
    signal : numpy.ndarray
        Averaged data in `infile` defined by regions in `label_img`
    """

    infile = check_niimg(infile, atleast_4d=True)
    label_img = check_niimg(label_img, ensure_ndim=3)
    data = infile.get_data()

    if infile.shape[:3] != label_img.shape:
        raise ValueError('Provided file {} does not match label image {}.'
                         .format(infile.file_map['image'].filename,
                                 label_img.file_map['image'].filename))

    labels_data = label_img.get_data()
    labels = np.trim_zeros(np.unique(labels_data))

    # average over data within each parcel
    signal = np.asarray(ndimage.measurements.mean(data.squeeze(),
                                                  labels=labels_data,
                                                  index=labels))
    return signal
Ejemplo n.º 11
0
def test_check_niimg():
    affine = np.eye(4)
    img_3d = Nifti1Image(np.ones((10, 10, 10)), affine)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_3_3d = [[[img_3d, img_3d]]]
    img_2_4d = [[img_4d, img_4d]]

    assert_raises_regex(
        DimensionError,
        "Input data has incompatible dimensionality: "
        "Expected dimension is 2D and you provided "
        "a list of list of list of 3D images \(6D\)",
        _utils.check_niimg, img_3_3d, ensure_ndim=2)

    assert_raises_regex(
        DimensionError,
        "Input data has incompatible dimensionality: "
        "Expected dimension is 4D and you provided "
        "a list of list of 4D images \(6D\)",
        _utils.check_niimg, img_2_4d, ensure_ndim=4)

    # check data dtype equal with dtype='auto'
    img_3d_check = _utils.check_niimg(img_3d, dtype='auto')
    assert_equal(img_3d.get_data().dtype.kind, img_3d_check.get_data().dtype.kind)

    img_4d_check = _utils.check_niimg(img_4d, dtype='auto')
    assert_equal(img_4d.get_data().dtype.kind, img_4d_check.get_data().dtype.kind)
Ejemplo n.º 12
0
def load_data(data, n_echos=None):
    """
    Coerces input `data` files to required 3D array output

    Parameters
    ----------
    data : (X x Y x M x T) array_like or :obj:`list` of img_like
        Input multi-echo data array, where `X` and `Y` are spatial dimensions,
        `M` is the Z-spatial dimensions with all the input echos concatenated,
        and `T` is time. A list of image-like objects (e.g., .nii) are
        accepted, as well
    n_echos : :obj:`int`, optional
        Number of echos in provided data array. Only necessary if `data` is
        array_like. Default: None

    Returns
    -------
    fdata : (S x E x T) :obj:`numpy.ndarray`
        Output data where `S` is samples, `E` is echos, and `T` is time
    ref_img : :obj:`str` or :obj:`numpy.ndarray`
        Filepath to reference image for saving output files or NIFTI-like array
    """
    if n_echos is None:
        raise ValueError(
            'Number of echos must be specified. '
            'Confirm that TE times are provided with the `-e` argument.')

    if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = load_image(img.get_data().reshape(nx,
                                              ny,
                                              nz,
                                              n_echos,
                                              -1,
                                              order='F'))

    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz)),
                            affine=img.affine,
                            header=img.header,
                            extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
Ejemplo n.º 13
0
def test_check_niimg():
    assert_raises(TypeError, _utils.check_niimg, 0)
    assert_raises(TypeError, _utils.check_niimg, [])
    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True)\
                as filename:
        _utils.check_niimg(filename)
Ejemplo n.º 14
0
def imgs_to_complex(mag, phase):
    """
    Combine magnitude and phase images into a complex-valued nifti image.
    """
    mag = check_niimg(mag)
    phase = check_niimg(phase)
    # Convert to radians to be extra safe
    phase = to_radians(phase)
    mag_data = mag.get_fdata()
    phase_data = phase.get_fdata()
    comp_data = to_complex(mag_data, phase_data)
    comp_img = nib.Nifti1Image(comp_data, mag.affine)
    return comp_img
Ejemplo n.º 15
0
def process_img(stat_img, cluster_extent, voxel_thresh=1.96):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    cluster_extent : int
        Minimum number of voxels required to consider a cluster
    voxel_thresh : int, optional
        Threshold to apply to `stat_img`. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    else:
        # ensure that threshold is not greater than most extreme value in image
        if voxel_thresh > np.abs(stat_img.get_data()).max():
            empty = np.zeros(stat_img.shape + (1,))
            return image.new_img_like(stat_img, empty)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    min_region_size = cluster_extent * np.prod(thresh_img.header.get_zooms())
    clusters = []
    for sign in ['pos', 'neg']:
        # keep only data of given sign
        data = thresh_img.get_data().copy()
        data[(data < 0) if sign == 'pos' else (data > 0)] = 0

        # Do nothing if data array contains only zeros
        if np.any(data):
            try:
                clusters += [connected_regions(
                    image.new_img_like(thresh_img, data),
                    min_region_size=min_region_size,
                    extract_type='connected_components')[0]]
            except TypeError:  # for no clusters
                pass

    # Return empty image if no clusters were found
    if len(clusters) == 0:
        return image.new_img_like(thresh_img, np.zeros(data.shape + (1,)))

    # Reorder clusters by their size
    clust_img = image.concat_imgs(clusters)
    cluster_size = (clust_img.get_data() != 0).sum(axis=(0, 1, 2))
    new_order = np.argsort(cluster_size)[::-1]
    clust_img_ordered = image.index_img(clust_img, new_order)

    return clust_img_ordered
Ejemplo n.º 16
0
def split_prob_along_hemisphere(atlas, axis=0):
    """
    Splits probabilistic ``atlas`` along provided ``axis``

    Parameters
    ----------
    atlas : str or img_like
        Filepath to atlas image or loaded atlas image
    axis : int, optional
        Axis along which to split `atlas`. Default: 0

    Return
    ------
    split : img_like
        Provided ``atlas`` split along ``axis``
    """

    atlas = check_niimg(atlas, atleast_4d=True)
    data = atlas.get_data()
    mask = np.zeros_like(data[:, :, :, [0]])

    rh, lh = mask.copy(), mask.copy()
    rh[:data.shape[axis] // 2] = 1
    lh[data.shape[axis] // 2:] = 1

    split = np.concatenate([rh * data, lh * data], axis=-1)

    return new_img_like(atlas, split)
Ejemplo n.º 17
0
def process_img(stat_img, voxel_thresh=1.96, cluster_extent=20):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    voxel_thresh : int, optional
        Threshold to apply to `stat_img`. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96
    cluster_extent : int, optional
        Minimum number of voxels required to consider a cluster. Default: 20

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    cluster_img = clusterize_img(thresh_img, cluster_extent=cluster_extent)

    return cluster_img
Ejemplo n.º 18
0
def split_det_along_hemisphere(atlas, axis=0):
    """
    Splits deterministic ``atlas`` along provided ``axis``

    Parameters
    ----------
    atlas : str or img_like
        Filepath to atlas image or loaded atlas image
    axis : int, optional
        Axis along which to split `atlas`. Default: 0

    Return
    ------
    split : img_like
        Provided ``atlas`` split along ``axis``
    """

    atlas = check_niimg(atlas, ensure_ndim=3)
    data = atlas.get_data()
    mask = np.zeros_like(data)

    rh, lh = mask.copy(), mask.copy()
    rh[:data.shape[axis] // 2] = 1
    lh[data.shape[axis] // 2:] = 1

    rh, lh = rh * data, lh * data
    lh[lh.nonzero()] += np.unique(rh).max()

    split = rh + lh

    return new_img_like(atlas, split)
def _unmask_single_img(masker,
                       imgs,
                       confounds,
                       root,
                       raw_dir,
                       mock=False,
                       overwrite=False):
    imgs = check_niimg(imgs)
    if imgs.get_filename() is None:
        raise ValueError('Provided Nifti1Image should be linked to a file.')
    filename = imgs.get_filename()
    raw_filename = filename.replace('.nii.gz', '.npy')
    raw_filename = raw_filename.replace(root, raw_dir)
    dirname = os.path.dirname(raw_filename)
    print('Saving %s to %s' % (filename, raw_filename))
    if not mock:
        if overwrite or not os.path.exists(raw_filename):
            try:
                data = masker.transform(imgs, confounds=confounds)
                if not os.path.exists(dirname):
                    os.makedirs(dirname)
                np.save(raw_filename, data)
            except EOFError:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                msg = '\n'.join(
                    traceback.format_exception(exc_type, exc_value,
                                               exc_traceback))
                raw_filename += '-error'
                if not os.path.exists(dirname):
                    os.makedirs(dirname)
                with open(raw_filename, 'w+') as f:
                    f.write(msg)
        else:
            print('File already exists: skipping.')
    return raw_filename
Ejemplo n.º 20
0
def get_peak_coords(clust_img):
    """
    Gets MNI coordinates of peak voxels within each cluster of `clust_img`

    Parameters
    ----------
    clust_img : 4D-niimg_like
        4D image of brain regions, where each volume is a separated cluster

    Returns
    ------
    coords : (N, 3) numpy.ndarray
        Coordinates of peak voxels in `clust_img`
    """

    # check cluster image and make it 4D, if not already
    clust_img = check_niimg(clust_img, atleast_4d=True)

    # create empty arrays to hold cluster size + peak coordinates
    clust_size = np.zeros(clust_img.shape[-1])
    maxcoords = np.zeros((clust_img.shape[-1], 3))

    # iterate through clusters and get info
    for n, cluster in enumerate(image.iter_img(clust_img)):
        cluster = np.abs(cluster.get_data())
        clust_size[n] = np.sum(cluster != 0)
        maxcoords[n] = center_of_mass(cluster == cluster.max())

    # sort peak coordinates by cluster size
    maxcoords = np.floor(maxcoords)[np.argsort(clust_size)[::-1]]

    # convert coordinates to MNI space
    coords = coord_ijk_to_xyz(clust_img.affine, maxcoords)

    return coords
Ejemplo n.º 21
0
def to_radians(phase):
    """
    Adapted from
    https://github.com/poldracklab/sdcflows/blob/
    659c2508ecef810c3acadbe808560b44d22801f9/sdcflows/interfaces/fmap.py#L94

    Ensure that phase images are in a usable range for unwrapping [0, 2pi).

    From the FUGUE User guide::

        If you have seperate phase volumes that are in integer format then do:

        fslmaths orig_phase0 -mul 3.14159 -div 2048 phase0_rad -odt float
        fslmaths orig_phase1 -mul 3.14159 -div 2048 phase1_rad -odt float

        Note that the value of 2048 needs to be adjusted for each different
        site/scanner/sequence in order to be correct. The final range of the
        phase0_rad image should be approximately 0 to 6.28. If this is not the
        case then this scaling is wrong. If you have separate phase volumes are
        not in integer format, you must still check that the units are in
        radians, and if not scale them appropriately using fslmaths.
    """
    phase_img = check_niimg(phase)
    phase_data = phase_img.get_fdata()
    imax = phase_data.max()
    imin = phase_data.min()
    scaled = (phase_data - imin) / (imax - imin)
    rad_data = 2 * np.pi * scaled
    out_img = nib.Nifti1Image(rad_data, phase_img.affine, phase_img.header)
    return out_img
Ejemplo n.º 22
0
def new_nii_like(ref_img, data, affine=None, copy_header=True):
    """
    Coerces `data` into NiftiImage format like `ref_img`

    Parameters
    ----------
    ref_img : str or img_like
        Reference image
    data : (S [x T]) array_like
        Data to be saved
    affine : (4 x 4) array_like, optional
        Transformation matrix to be used. Default: `ref_img.affine`
    copy_header : bool, optional
        Whether to copy header from `ref_img` to new image. Default: True

    Returns
    -------
    nii : :obj:`nibabel.nifti1.Nifti1Image`
        NiftiImage
    """

    ref_img = check_niimg(ref_img)
    nii = new_img_like(ref_img,
                       data.reshape(ref_img.shape[:3] + data.shape[1:]),
                       affine=affine,
                       copy_header=copy_header)
    nii.set_data_dtype(data.dtype)

    return nii
Ejemplo n.º 23
0
def new_nii_like(ref_img, data, affine=None, copy_header=True):
    """
    Coerces `data` into NiftiImage format like `ref_img`

    Parameters
    ----------
    ref_img : :obj:`str` or img_like
        Reference image
    data : (S [x T]) array_like
        Data to be saved
    affine : (4 x 4) array_like, optional
        Transformation matrix to be used. Default: `ref_img.affine`
    copy_header : :obj:`bool`, optional
        Whether to copy header from `ref_img` to new image. Default: True

    Returns
    -------
    nii : :obj:`nibabel.nifti1.Nifti1Image`
        NiftiImage
    """

    ref_img = check_niimg(ref_img)
    nii = new_img_like(ref_img,
                       data.reshape(ref_img.shape[:3] + data.shape[1:]),
                       affine=affine,
                       copy_header=copy_header)
    nii.set_data_dtype(data.dtype)

    return nii
Ejemplo n.º 24
0
def new_nii_like(ref_img, data, affine=None, copy_header=True):
    """
    Coerces `data` into NiftiImage format like `ref_img`

    Parameters
    ----------
    ref_img : :obj:`str` or img_like
        Reference image
    data : (S [x T]) array_like
        Data to be saved
    affine : (4 x 4) array_like, optional
        Transformation matrix to be used. Default: `ref_img.affine`
    copy_header : :obj:`bool`, optional
        Whether to copy header from `ref_img` to new image. Default: True

    Returns
    -------
    nii : :obj:`nibabel.nifti1.Nifti1Image`
        NiftiImage
    """

    ref_img = check_niimg(ref_img)
    newdata = data.reshape(ref_img.shape[:3] + data.shape[1:])
    if '.nii' not in ref_img.valid_exts:
        # this is rather ugly and may lose some information...
        nii = nib.Nifti1Image(newdata, affine=ref_img.affine,
                              header=ref_img.header)
    else:
        # nilearn's `new_img_like` is a very nice function
        nii = new_img_like(ref_img, newdata, affine=affine,
                           copy_header=copy_header)
    nii.set_data_dtype(data.dtype)

    return nii
Ejemplo n.º 25
0
def clean_img(img):
    """ Remove nan/inf entries."""
    img = check_niimg(img)
    img_data = img.get_data()
    img_data[np.isnan(img_data)] = 0
    img_data[np.isinf(img_data)] = 0
    return new_img_like(img, img_data, copy_header=True)
Ejemplo n.º 26
0
def get_peak_data(clust_img,
                  atlas='default',
                  prob_thresh=5,
                  min_distance=None):
    """
    Parameters
    ----------
    clust_img : Niimg_like
        3D image of a single, valued cluster
    atlas : str or list, optional
        Name of atlas(es) to consider for cluster analysis. Default: 'default'
    prob_thresh : [0, 100] int, optional
        Probability (percentage) threshold to apply to `atlas`, if it is
        probabilistic. Default: 5
    min_distance : float, optional
        Specifies the minimum distance required between sub-peaks in a cluster.
        If None, sub-peaks will not be examined and only the primary cluster
        peak will be reported. Default: None

    Returns
    -------
    peak_summary : numpy.ndarray
        Info on peaks found in `clust_img`, including peak coordinates, peak
        values, volume of cluster that peaks belong to, and neuroanatomical
        locations defined in `atlas`
    """
    data = check_niimg(clust_img).get_data()

    # get voxel volume information
    voxel_volume = np.prod(clust_img.header.get_zooms())

    # find peaks -- or subpeaks, if `min_distance` is set
    if min_distance is None:
        peaks = get_peak_coords(clust_img)
    else:
        peak_img = image.math_img('np.abs(img)', img=clust_img)
        peaks = get_subpeak_coords(peak_img, min_distance=min_distance)
    peaks = coord_xyz_to_ijk(clust_img.affine, peaks)

    # get info on peak in cluster (all peaks belong to same cluster ID!)
    cluster_volume = np.repeat(np.sum(data != 0) * voxel_volume, len(peaks))
    peak_values = data[tuple(map(tuple, peaks.T))]
    coords = coord_ijk_to_xyz(clust_img.affine, peaks)
    peak_info = []
    for coord in coords:
        coord_info = []
        for atype in check_atlases(atlas):
            segment = read_atlas_peak(atype, coord, prob_thresh)
            coord_info.append([atype.atlas, segment])

        peak_info += [[
            peak if type(peak) != list else '; '.join(
                ['{}% {}'.format(*e) for e in peak])
            for (_, peak) in coord_info
        ]]

    return np.column_stack([coords, peak_values, cluster_volume, peak_info])
Ejemplo n.º 27
0
def display_maps(fig, components, index=0):
    components = check_niimg(components)
    ax = fig.add_subplot(2, 1, 1)
    plot_prob_atlas(components, view_type="filled_contours", axes=ax)
    ax = fig.add_subplot(2, 1, 2)
    plot_stat_map(index_img(components, index),
                  axes=ax,
                  colorbar=False,
                  threshold=0)
    return fig
 def transform_single_imgs(self, imgs, confounds=None, copy=True):
     imgs = check_niimg(imgs)
     data = imgs.get_data()
     data[np.logical_not(np.isfinite(data))] = 0
     imgs = Nifti1Image(data, imgs.affine)
     masked_imgs = super().transform_single_imgs(imgs, confounds, copy)
     if self.reduction is None:
         return masked_imgs
     else:
         return masked_imgs.dot(self._masked_maps_inv_)
def label_to_maps(label_img):
    label_img = check_niimg(label_img, ensure_ndim=3)
    shape = label_img.shape
    data = label_img.get_data()
    affine = label_img.get_affine()
    n_components = int(data.max())
    img_data = np.zeros((*shape, n_components))
    for i in range(1, n_components + 1):
        img_data[:, :, :, i - 1] = data == i
    return Nifti1Image(img_data, affine)
Ejemplo n.º 30
0
def test_check_niimg():
    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError, 'image', _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError, 'image', _utils.check_niimg, [])

    # Test ensure_3d
    # check error for non-forced but necessary resampling
    assert_raises_regexp(TypeError,
                         '3D',
                         _utils.check_niimg, [
                             'test.nii',
                         ],
                         ensure_3d=True)

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg(filename)

    # Test ensure_3d with a in-memory object
    assert_raises_regexp(TypeError,
                         '3D',
                         _utils.check_niimg,
                         data,
                         ensure_3d=True)

    # Test ensure_3d with a non 3D image
    assert_raises_regexp(TypeError,
                         '3D',
                         _utils.check_niimg,
                         data_img,
                         ensure_3d=True)

    # Test ensure_3d with a 4D image with a length 1 4th dim
    data = np.zeros((40, 40, 40, 1))
    data_img = Nifti1Image(data, np.eye(4))
    _utils.check_niimg(data_img, ensure_3d=True)
Ejemplo n.º 31
0
def test_check_niimg():
    affine = np.eye(4)
    img_3d = Nifti1Image(np.ones((10, 10, 10)), affine)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_3_3d = [[[img_3d, img_3d]]]
    img_2_4d = [[img_4d, img_4d]]

    with pytest.raises(DimensionError,
                       match="Input data has incompatible dimensionality: "
                       "Expected dimension is 2D and you provided "
                       "a list of list of list of 3D images \\(6D\\)"):
        _utils.check_niimg(img_3_3d, ensure_ndim=2)

    with pytest.raises(DimensionError,
                       match="Input data has incompatible dimensionality: "
                       "Expected dimension is 4D and you provided "
                       "a list of list of 4D images \\(6D\\)"):
        _utils.check_niimg(img_2_4d, ensure_ndim=4)

    # check data dtype equal with dtype='auto'
    img_3d_check = _utils.check_niimg(img_3d, dtype='auto')
    assert (get_data(img_3d).dtype.kind == get_data(img_3d_check).dtype.kind)

    img_4d_check = _utils.check_niimg(img_4d, dtype='auto')
    assert (get_data(img_4d).dtype.kind == get_data(img_4d_check).dtype.kind)
Ejemplo n.º 32
0
def split_complex(comp_img):
    """
    Split a complex-valued nifti image into magnitude and phase images.
    """
    comp_img = check_niimg(comp_img)
    comp_data = comp_img.get_fdata(dtype=comp_img.get_data_dtype())
    real = comp_data.real
    imag = comp_data.imag
    mag = abs(comp_data)
    phase = to_phase(real, imag)
    mag = nib.Nifti1Image(mag, comp_img.affine)
    phase = nib.Nifti1Image(phase, comp_img.affine)
    return mag, phase
Ejemplo n.º 33
0
def _get_64(niimg):
    f = check_niimg(niimg)
    data = f.get_data().astype(float)
    data = data + data.min()
    data = data / data.max() * 254.0
    data = data.astype(np.uint8)
    f = nibabel.Nifti1Image(data, f.get_affine())
    _, filename = tempfile.mkstemp(suffix=".nii.gz")
    f.to_filename(filename)
    with open(filename, "rb") as f:
        b64 = base64.b64encode(f.read())
    os.unlink(filename)
    return b64.decode("utf-8")
Ejemplo n.º 34
0
def recover_kspace(magnitude, phase, out_real_file=None, out_imag_file=None):
    """
    Convert raw magnitude and phase data into effective k-space data, split
    into real and imaginary components.
    """
    import numpy as np
    import nibabel as nib
    import os.path as op
    from nipype.utils.filemanip import split_filename
    from nilearn._utils import check_niimg
    from complex_utils import to_complex

    magnitude_img = check_niimg(magnitude)
    phase_img = check_niimg(phase)
    phase_data = phase_img.get_fdata()
    magnitude_data = magnitude_img.get_fdata()
    kspace_data = np.zeros(phase_data.shape, dtype=complex)
    cmplx_data = to_complex(magnitude_data, phase_data)

    for i_vol in range(cmplx_data.shape[3]):
        for j_slice in range(cmplx_data.shape[2]):
            slice_data = cmplx_data[:, :, j_slice, i_vol]
            slice_kspace = np.fft.ifft(slice_data)
            kspace_data[:, :, j_slice, i_vol] = slice_kspace

    kspace_real_data, kspace_imag_data = kspace_data.real, kspace_data.imag
    kspace_real_img = nib.Nifti1Image(kspace_real_data, magnitude_img.affine,
                                      magnitude_img.header)
    kspace_imag_img = nib.Nifti1Image(kspace_imag_data, magnitude_img.affine,
                                      magnitude_img.header)
    if out_real_file is None:
        _, base, _ = split_filename(magnitude)
        out_real_file = op.abspath(base + '_real.nii.gz')
    if out_imag_file is None:
        _, base, _ = split_filename(magnitude)
        out_imag_file = op.abspath(base + '_imag.nii.gz')
    kspace_real_img.to_filename(out_real_file)
    kspace_imag_img.to_filename(out_imag_file)
    return out_real_file, out_imag_file
Ejemplo n.º 35
0
def _get_64(niimg):
    f = check_niimg(niimg)
    data = f.get_data().astype(float)
    data = data + data.min()
    data = data / data.max() * 254.
    data = data.astype(np.uint8)
    f = nibabel.Nifti1Image(data, f.get_affine())
    _, filename = tempfile.mkstemp(suffix='.nii.gz')
    f.to_filename(filename)
    with open(filename, 'rb') as f:
        b64 = base64.b64encode(f.read())
    os.unlink(filename)
    return b64.decode('utf-8')
Ejemplo n.º 36
0
def smooth_img(imgs, fwhm, **kwargs):
    """Smooth images by applying a Gaussian filter.
    Apply a Gaussian filter along the three first dimensions of arr.
    In all cases, non-finite values in input image are replaced by zeros.

    This is copied and slightly modified from nilearn:
    https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
    Added the **kwargs argument.

    Parameters
    ==========
    imgs: Niimg-like object or iterable of Niimg-like objects
        See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.
        Image(s) to smooth.
    fwhm: scalar, numpy.ndarray, 'fast' or None
        Smoothing strength, as a Full-Width at Half Maximum, in millimeters.
        If a scalar is given, width is identical on all three directions.
        A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
        If fwhm == 'fast', a fast smoothing will be performed with
        a filter [0.2, 1, 0.2] in each direction and a normalisation
        to preserve the scale.
        If fwhm is None, no filtering is performed (useful when just removal
        of non-finite values is needed)
    Returns
    =======
    filtered_img: nibabel.Nifti1Image or list of.
        Input image, filtered. If imgs is an iterable, then filtered_img is a
        list.
    """

    # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug
    # See http://bugs.python.org/issue7624
    if hasattr(imgs, "__iter__") \
       and not isinstance(imgs, string_types):
        single_img = False
    else:
        single_img = True
        imgs = [imgs]

    ret = []
    for img in imgs:
        img = check_niimg(img)
        affine = img.get_affine()
        filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,
                                 ensure_finite=True, copy=True, **kwargs)
        ret.append(new_img_like(img, filtered, affine, copy_header=True))

    if single_img:
        return ret[0]
    else:
        return ret
Ejemplo n.º 37
0
def load_data(data, n_echos=None):
    """
    Coerces input `data` files to required 3D array output

    Parameters
    ----------
    data : (X x Y x M x T) array_like or list-of-img_like
        Input multi-echo data array, where `X` and `Y` are spatial dimensions,
        `M` is the Z-spatial dimensions with all the input echos concatenated,
        and `T` is time. A list of image-like objects (e.g., .nii or .gii) are
        accepted, as well
    n_echos : int, optional
        Number of echos in provided data array. Only necessary if `data` is
        array_like. Default: None

    Returns
    -------
    fdata : (S x E x T) :obj:`numpy.ndarray`
        Output data where `S` is samples, `E` is echos, and `T` is time
    ref_img : str
        Filepath to reference image for saving output files
    """

    if isinstance(data, list):
        if get_dtype(data) == 'GIFTI':  # TODO: deal with L/R split GIFTI files
            pass
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([load_image(f) for f in data], axis=1)
            return np.atleast_3d(fdata), data[0]

    # we have a z-cat file -- we need to know how many echos are in it!
    if n_echos is None:
        raise ValueError('Number of echos `n_echos` must be specified if only '
                         'one data file is provided.')
    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))

    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
Ejemplo n.º 38
0
def split_multiecho_volumewise(echo_imgs):
    """
    Take 4D echo-specific images, split them by volume, and concatenate each
    volume across echoes, as input for ROMEO.
    """
    from nilearn import image
    echo_imgs = [check_niimg(ei) for ei in echo_imgs]
    out_imgs = []
    for i in range(echo_imgs[0].shape[3]):
        vol_imgs = []
        for j_echo, echo_img in enumerate(echo_imgs):
            vol_imgs.append(image.index_img(echo_img, i))
        out_imgs.append(image.concat_imgs(vol_imgs))
    return out_imgs
Ejemplo n.º 39
0
Archivo: fmri.py Proyecto: BigRLab/modl
def _lazy_scan(imgs):
    """Extracts number of samples and dtype
    from a 4D list of Niilike-image, without loading data"""
    n_samples_list = []
    for img in imgs:
        try:
            img = check_niimg(img)
            this_n_samples = img.shape[3]
            dtype = img.get_data_dtype()
        except ImageFileError:
            img = np.load(img, mmap_mode='r')
            this_n_samples = img.shape[0]
            dtype = img.dtype
        n_samples_list.append(this_n_samples)
    return n_samples_list, dtype
def out_brain_confounds(epi_img, mask_img):
    """ Return the 5 principal components of the signal outside the
        brain.
    """
    mask_img = check_niimg(mask_img)
    mask_img = nibabel.Nifti1Image(
        np.logical_not(mask_img.get_data()).astype(np.int),
        mask_img.get_affine())
    sigs = masking.apply_mask(epi_img, mask_img)
    # Remove the constant signals
    non_constant = np.any(np.diff(sigs, axis=0) != 0, axis=0)
    sigs = sigs[:, non_constant]
    sigs = signal.clean(sigs, detrend=True)
    U, s, V = randomized_svd(sigs, 5, random_state=0)
    return U
def _region_extractor_labels_image(atlas, extract_type='connected_components',
                                   min_region_size=0):
    """ Function takes atlas image denoted as labels for each region
        and then imposes region extraction algorithm on the image to
        split them into regions apart.

    Parameters
    ----------
    atlas : 3D Nifti-like image
        An image contains labelled regions.

    extract_type : 'connected_components', 'local_regions'
        See nilearn.regions.connected_regions for full documentation

    min_region_size : in mm^3
        Minimum size of voxels in a region to be kept.

    """
    atlas_img = check_niimg(atlas)
    atlas_data = _safe_get_data(atlas_img)
    affine = atlas_img.get_affine()

    n_labels = np.unique(np.asarray(atlas_data))

    reg_imgs = []
    for label_id in n_labels:
        if label_id == 0:
            continue
        print("[Region Extraction] Processing with label {0}".format(label_id))
        region = (atlas_data == label_id) * atlas_data
        reg_img = new_img_like(atlas_img, region)
        regions, _ = connected_regions(reg_img, extract_type=extract_type,
                                       min_region_size=min_region_size)
        reg_imgs.append(regions)
    regions_extracted = concat_niimgs(reg_imgs)

    return regions_extracted, n_labels
def test_check_niimg():
    with assert_raises(TypeError) as cm:
        _utils.check_niimg(0)
    assert_true('image' in cm.exception.message
                or 'affine' in cm.exception.message)

    with assert_raises(TypeError) as cm:
        _utils.check_niimg([])
    assert_true('image' in cm.exception.message
                or 'affine' in cm.exception.message)
    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True)\
                as filename:
        _utils.check_niimg(filename)
Ejemplo n.º 43
0
def test_check_niimg_wildcards():
    tmp_dir = tempfile.tempdir + os.sep
    nofile_path = "/tmp/nofile"
    nofile_path_wildcards = "/tmp/no*file"
    wildcards_msg = ("No files matching the entered niimg expression: "
                     "'%s'.\n You may have left wildcards usage "
                     "activated: please set the global constant "
                     "'nilearn.EXPAND_PATH_WILDCARDS' to False to "
                     "deactivate this behavior.")

    file_not_found_msg = "File not found: '%s'"

    assert_equal(ni.EXPAND_PATH_WILDCARDS, True)
    # Check bad filename
    # Non existing file (with no magic) raise a ValueError exception
    assert_raises_regex(ValueError, file_not_found_msg % nofile_path,
                        _utils.check_niimg, nofile_path)
    # Non matching wildcard raises a ValueError exception
    assert_raises_regex(ValueError,
                        wildcards_msg % re.escape(nofile_path_wildcards),
                        _utils.check_niimg, nofile_path_wildcards)

    # First create some testing data
    data_3d = np.zeros((40, 40, 40))
    data_3d[20, 20, 20] = 1
    img_3d = Nifti1Image(data_3d, np.eye(4))

    data_4d = np.zeros((40, 40, 40, 3))
    data_4d[20, 20, 20] = 1
    img_4d = Nifti1Image(data_4d, np.eye(4))

    #######
    # Testing with an existing filename
    with testing.write_tmp_imgs(img_3d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename).get_data(),
                           img_3d.get_data())
    # No globbing behavior
    with testing.write_tmp_imgs(img_3d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename,
                                              wildcards=False).get_data(),
                           img_3d.get_data())

    #######
    # Testing with an existing filename
    with testing.write_tmp_imgs(img_4d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename).get_data(),
                           img_4d.get_data())
    # No globbing behavior
    with testing.write_tmp_imgs(img_4d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename,
                                              wildcards=False).get_data(),
                           img_4d.get_data())

    #######
    # Testing with a glob matching exactly one filename
    # Using a glob matching one file containing a 3d image returns a 4d image
    # with 1 as last dimension.
    with testing.write_tmp_imgs(img_3d,
                                create_files=True,
                                use_wildcards=True) as globs:
        glob_input = tmp_dir + globs
        assert_array_equal(_utils.check_niimg(glob_input).get_data()[..., 0],
                           img_3d.get_data())
    # Disabled globbing behavior should raise an ValueError exception
    with testing.write_tmp_imgs(img_3d,
                                create_files=True,
                                use_wildcards=True) as globs:
        glob_input = tmp_dir + globs
        assert_raises_regex(ValueError,
                            file_not_found_msg % re.escape(glob_input),
                            _utils.check_niimg,
                            glob_input,
                            wildcards=False)

    #######
    # Testing with a glob matching multiple filenames
    img_4d = _utils.check_niimg_4d((img_3d, img_3d))
    with testing.write_tmp_imgs(img_3d, img_3d,
                                create_files=True,
                                use_wildcards=True) as globs:
        assert_array_equal(_utils.check_niimg(glob_input).get_data(),
                           img_4d.get_data())

    #######
    # Test when global variable is set to False => no globbing allowed
    ni.EXPAND_PATH_WILDCARDS = False

    # Non existing filename (/tmp/nofile) could match an existing one through
    # globbing but global wildcards variable overrides this feature => raises
    # a ValueError
    assert_raises_regex(ValueError,
                        file_not_found_msg % nofile_path,
                        _utils.check_niimg, nofile_path)

    # Verify wildcards function parameter has no effect
    assert_raises_regex(ValueError,
                        file_not_found_msg % nofile_path,
                        _utils.check_niimg, nofile_path, wildcards=False)

    # Testing with an exact filename matching (3d case)
    with testing.write_tmp_imgs(img_3d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename).get_data(),
                           img_3d.get_data())

    # Testing with an exact filename matching (4d case)
    with testing.write_tmp_imgs(img_4d, create_files=True) as filename:
        assert_array_equal(_utils.check_niimg(filename).get_data(),
                           img_4d.get_data())

    # Reverting to default behavior
    ni.EXPAND_PATH_WILDCARDS = True
Ejemplo n.º 44
0
def check_embedded_atlas_masker(estimator, atlas_type=None, img=None,
                                multi_subject=True, seeds=None, radius=None,
                                t_r=None, low_pass=None, high_pass=None):
    """Base function to return masker type and its parameters

    Accepts all Nilearn masker types but returns only Maps or Labels masker.
    The idea being that this function returns an object with essential
    parameters embedded in it such as repetition time, low pass, high pass,
    standardize, detrend for resting state fMRI data analysis.

    Mostly useful for pipelined analysis.

    Parameters
    ----------
    estimator : object, instance of all masker types
        Accepts any instance masker type from nilearn.input_data

    img : maps_img or labels_img
        Used in initialization of instance masker object depending upon
        the length/type of the image. If maps_img related then used in
        NiftiMapsMasker instance or labels_img then NiftiLabelsMasker.

    seeds : List of triplet of coordinates in native space
        Used in NiftiSpheresMasker initialization.

    atlas_type : str {'maps', 'labels'}
        'maps' implies NiftiMapsMasker
        'labels' implies NiftiLabelsMasker

    multi_subject : bool, optional
        Indicates whether to return masker of multi subject type.
        List of subjects.

    Returns
    -------
    masker : NiftiMapsMasker, NiftiLabelsMasker
        Depending upon atlas type.

    params : dict
        Masker parameters
    """
    if atlas_type is not None:
        if atlas_type not in ['maps', 'labels', 'spheres', 'auto']:
            raise ValueError(
                "You provided unsupported masker type for atlas_type={0} "
                "selection. Choose one among them ['maps', 'labels', 'spheres']"
                "for atlas type masker. Otherwise atlas_type=None for general "
                "Nifti or MultiNifti Maskers.".format(atlas_type))

    if not isinstance(estimator, (NiftiMasker, MultiNiftiMasker, NiftiMapsMasker,
                                  NiftiLabelsMasker, NiftiSpheresMasker)):
        raise ValueError("Unsupported 'estimator' instance of masker is "
                         "provided".format(estimator))

    if atlas_type == 'spheres' and seeds is None:
        raise ValueError("'seeds' must be specified for atlas_type='spheres'."
                         "See documentation nilearn.input_data.NiftiSpheresMasker.")

    if (atlas_type == 'maps' or atlas_type == 'labels') and img is None:
        raise ValueError("'img' should not be None for atlas_type={0} related "
                         "instance of masker. Atlas related maskers is created "
                         "by provided a valid atlas image. See documenation in "
                         "nilearn.input_data for specific "
                         "masker related either maps or labels".format(atlas_type))

    if atlas_type == 'auto' and img is not None:
        img = check_niimg(img)
        if len(img.shape) > 3:
            atlas_type = 'maps'
        else:
            atlas_type = 'labels'

    if atlas_type == 'maps' and img is not None:
        img = check_niimg_4d(img)

    if atlas_type == 'labels' and img is not None:
        img = check_niimg_3d(img)

    new_masker = check_embedded_nifti_masker(estimator,
                                             multi_subject=multi_subject)
    mask = getattr(new_masker, 'mask_img', None)
    estimator_mask = getattr(estimator, 'mask_img', None)
    if mask is None and estimator_mask is not None:
        new_masker.mask_img = estimator.mask_img

    if atlas_type is None:
        return new_masker
    else:
        masker_params = new_masker.get_params()
        new_masker_params = dict()
        _ignore = set(('mask_strategy', 'mask_args', 'n_jobs', 'target_affine',
                       'target_shape'))
        for param in masker_params:
            if param in _ignore:
                continue
            if hasattr(new_masker, param):
                new_masker_params[param] = getattr(new_masker, param)
        # Append atlas extraction related parameters
        if t_r is not None:
            new_masker_params['t_r'] = t_r

        if low_pass is not None:
            new_masker_params['low_pass'] = low_pass

        if high_pass is not None:
            new_masker_params['high_pass'] = high_pass

        if atlas_type is not None:
            if len(img.shape) > 3 and atlas_type == 'maps':
                new_masker_params['maps_img'] = img
                new_masker = NiftiMapsMasker(**new_masker_params)
            elif len(img.shape) == 3 and atlas_type == 'labels':
                new_masker_params['labels_img'] = img
                new_masker = NiftiLabelsMasker(**new_masker_params)

            return new_masker

        if seeds is not None and atlas_type == 'spheres':
            if radius is not None:
                new_masker_params['radius'] = radius
            new_masker_params['seeds'] = seeds
            new_masker = NiftiSpheresMasker(**new_masker_params)

        return new_masker
Ejemplo n.º 45
0
def cast_img(img, dtype=np.float32):
    """ Cast image to the specified dtype"""
    img = check_niimg(img)
    img_data = img.get_data().astype(dtype)
    return new_img_like(img, img_data, copy_header=True)
Ejemplo n.º 46
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) - 1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1,)))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
Ejemplo n.º 47
0
def find_region_names_using_cut_coords(coords, atlas_img, labels=None):
    """Given list of MNI space coordinates, get names of the brain regions.

    Names of the brain regions are returned by getting nearest coordinates
    in the given `atlas_img` space iterated over the provided list of
    `coords`. These new image coordinates are then used to grab the label
    number (int) and name assigned to it. Last, these names are returned.

    Parameters
    ----------
    coords : Tuples of coordinates in a list
        MNI coordinates.

    atlas_img : Nifti-like image
        Path to or Nifti-like object. The labels (integers) ordered in
        this image should be sequential. Example: [0, 1, 2, 3, 4] but not
        [0, 5, 6, 7]. Helps in returning correct names without errors.

    labels : str in a list
        Names of the brain regions assigned to each label in atlas_img.
        NOTE: label with index 0 is assumed as background. Example:
            harvard oxford atlas. Hence be removed.

    Returns
    -------
    new_labels : int in a list
        Labels in integers generated according to correspondence with
        given atlas image and provided coordinates.

    names : str in a list
        Names of the brain regions generated according to given inputs.
    """
    if not isinstance(coords, collections.Iterable):
        raise ValueError("coords given must be a list of triplets of "
                         "coordinates in native space [(1, 2, 3)]. "
                         "You provided {0}".format(type(coords)))

    if isinstance(atlas_img, _basestring):
        atlas_img = check_niimg(atlas_img)

    affine = get_affine(atlas_img)
    atlas_data = _safe_get_data(atlas_img, ensure_finite=True)
    check_labels_from_atlas = np.unique(atlas_data)

    if labels is not None:
        names = []
        if not isinstance(labels, collections.Iterable):
            labels = np.asarray(labels)

    if isinstance(labels, collections.Iterable) and \
            isinstance(check_labels_from_atlas, collections.Iterable):
        if len(check_labels_from_atlas) != len(labels):
            warnings.warn("The number of labels provided does not match "
                          "with number of unique labels with atlas image.",
                          stacklevel=2)

    coords = list(coords)
    nearest_coordinates = []

    for sx, sy, sz in coords:
        nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine)))
        nearest = nearest.astype(int)
        nearest = (nearest[0], nearest[1], nearest[2])
        nearest_coordinates.append(nearest)

    assert(len(nearest_coordinates) == len(coords))

    new_labels = []
    for coord_ in nearest_coordinates:
        # Grab index of current coordinate
        index = atlas_data[coord_]
        new_labels.append(index)
        if labels is not None:
            names.append(labels[index])

    if labels is not None:
        return new_labels, names
    else:
        return new_labels
Ejemplo n.º 48
0
from nilearn import datasets, _utils
from pynax.core import Mark
from pynax.view import ImshowView
import pylab as pl
import numpy as np

nyu = datasets.fetch_nyu_rest(n_subjects=1)
func = nyu.func[0]
niimg = _utils.check_niimg(func)
fig = pl.figure(figsize=(17, 5))
data = niimg.get_data()[..., 0]

# Awesome example activation map: threshold
data_act = np.ma.MaskedArray(data * 0.8, mask=(data < .6 * np.max(data)))

# Display options
display_options = {}
display_options['interpolation'] = 'nearest'
display_options['cmap'] = pl.cm.gray

ac_display_options = {}
ac_display_options['interpolation'] = 'nearest'
ac_display_options['cmap'] = pl.cm.autumn
ac_display_options['vmin'] = data_act.min()
ac_display_options['vmax'] = data_act.max()

# Marks
mx = Mark(20, {'color': 'r'})
marks = []
views = []
for i in range(10):
Ejemplo n.º 49
0
def test_check_niimg():
    with assert_raises(TypeError) as cm:
        _utils.check_niimg(0)
    assert_true('image' in cm.exception.message
                or 'affine' in cm.exception.message)

    with assert_raises(TypeError) as cm:
        _utils.check_niimg([])
    assert_true('image' in cm.exception.message
                or 'affine' in cm.exception.message)

    # Test ensure_3d
    with assert_raises(TypeError) as cm:
        _utils.check_niimg(['test.nii', ], ensure_3d=True)
    assert_true('3D' in cm.exception.message)

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg(filename)

    # Test ensure_3d with a in-memory object
    with assert_raises(TypeError) as cm:
        _utils.check_niimg(data, ensure_3d=True)
    assert_true('3D' in cm.exception.message)

    # Test ensure_3d with a non 3D image
    with assert_raises(TypeError) as cm:
        _utils.check_niimg(data_img, ensure_3d=True)
    assert_true('3D' in cm.exception.message)

    # Test ensure_3d with a 4D image with a length 1 4th dim
    data = np.zeros((40, 40, 40, 1))
    data_img = Nifti1Image(data, np.eye(4))
    _utils.check_niimg(data_img, ensure_3d=True)
Ejemplo n.º 50
0
    def fit(self, imgs, y=None, confounds=None, raw=False):
        """Compute the mask and the ICA maps across subjects

        Parameters
        ----------
        imgs: list of Niimg-like objects
            See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
            Data on which PCA must be calculated. If this is a list,
            the affine is considered the same for all.

        confounds: CSV file path or 2D matrix
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details
        """
        # Base logic for decomposition estimators
        BaseDecomposition.fit(self, imgs)

        # Cast to MultiNiftiMasker with shelving
        masker_params = self.masker_.get_params()
        mask_img = self.masker_.mask_img_
        masker_params['mask_img'] = mask_img
        self.masker_ = MultiNiftiMasker(**masker_params).fit()

        random_state = check_random_state(self.random_state)

        n_epochs = int(self.n_epochs)
        if self.n_epochs < 1:
            raise ValueError('Number of n_epochs should be at least one,'
                             ' got {r}'.format(self.n_epochs))

        if confounds is None:
            confounds = itertools.repeat(None)

        if raw:
            data_list = imgs
        else:
            if self.shelve:
                data_list = self.masker_.transform(imgs, confounds,
                                                   shelve=True)
            else:
                data_list = list(zip(imgs, confounds))

        if raw:
            record_samples = [np.load(img, mmap_mode='r').shape[0] for img in
                              imgs]
        else:
            record_samples = [check_niimg(img).shape[3] for img in imgs]
        offset_list = np.zeros(len(imgs) + 1, dtype='int')
        offset_list[1:] = np.cumsum(record_samples)

        if self.dict_init is not None:
            dict_init = self.masker_.transform(self.dict_init)
        else:
            dict_init = None

        dict_mf = DictMF(n_components=self.n_components,
                         alpha=self.alpha,
                         reduction=self.reduction,
                         projection=self.projection,
                         learning_rate=self.learning_rate,
                         offset=self.offset,
                         var_red=self.var_red,
                         replacement=self.replacement,
                         n_samples=offset_list[-1] + 1,
                         batch_size=self.batch_size,
                         random_state=random_state,
                         dict_init=dict_init,
                         l1_ratio=self.l1_ratio,
                         backend=self.backend,
                         verbose=max(0, self.verbose - 1))

        # Preinit
        max_sample_size = max(record_samples)
        sample_subset_range = np.arange(max_sample_size)
        n_voxels = np.sum(check_niimg(self.masker_.mask_img_).get_data() != 0)

        data_array = np.empty((max_sample_size, n_voxels),
                              dtype='float', order='C')

        self.components_ = np.empty((self.n_components, n_voxels), order='F')

        # Epoch logic
        data_idx = itertools.chain(*[random_state.permutation(
            len(imgs)) for _ in range(n_epochs)])

        if self.trace_folder is not None:
            results = {'reduction': self.reduction, 'alpha': self.alpha,
                       'timings': [0.]}
            json.dump(results,
                      open(join(self.trace_folder, 'results.json'), 'w+'))

        for record, this_data_idx in enumerate(data_idx):
            this_data = data_list[this_data_idx]
            this_n_samples = record_samples[this_data_idx]
            if self.var_red:
                offset = offset_list[this_data_idx]
            if self.verbose:
                print('Streaming record %s' % record)
            if raw:
                data_array[:this_n_samples] = np.load(this_data,
                                                      mmap_mode='r')
            else:
                if self.shelve:
                    data_array[:this_n_samples] = this_data.get()
                else:
                    data_array[:this_n_samples] = self.masker_.transform(
                        this_data[0],   
                        confounds=this_data[1])
            if self.trace_folder is not None:
                t0 = time.time()
            if self.var_red:
                dict_mf.partial_fit(data_array[:this_n_samples],
                                    sample_subset=offset + sample_subset_range[
                                                           :this_n_samples],
                                    check_input=False)
            else:
                dict_mf.partial_fit(data_array)
            if self.trace_folder is not None:
                t1 = (time.time() - t0) + results['timings'][-1]
                results['timings'].append(t1)
                json.dump(results,
                          open(join(self.trace_folder, 'results.json'), 'w+'))
                if record % 4 == 0:
                    self.components_[:] = dict_mf.components_
                    _normalize_and_flip(self.components_)

                    self.masker_.inverse_transform(
                        self.components_).to_filename(join(self.trace_folder,
                                                           "record_"
                                                           "%s.nii.gz"
                                                           % record))

        self.components_[:] = dict_mf.components_
        _normalize_and_flip(self.components_)
        return self