예제 #1
0
def test_new_img_like_side_effect():
    img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4))
    hash1 = joblib.hash(img1)
    new_img_like(img1, np.ones((2, 2, 2, 2)), img1.affine.copy(),
                 copy_header=True)
    hash2 = joblib.hash(img1)
    assert_equal(hash1, hash2)
예제 #2
0
def test_new_img_like_mgz():
    """Check that new images can be generated with bool MGZ type
    This is usually when computing masks using MGZ inputs, e.g.
    when using plot_stap_map
    """

    ref_img = nibabel.load(os.path.join(datadir, 'test.mgz'))
    data = np.ones(ref_img.get_data().shape, dtype=np.bool)
    affine = ref_img.affine
    new_img_like(ref_img, data, affine, copy_header=False)
예제 #3
0
def test_new_img_like():
    # Give a list to new_img_like
    data = np.zeros((5, 6, 7))
    data[2:4, 1:5, 3:6] = 1
    affine = np.diag((4, 3, 2, 1))
    img = nibabel.Nifti1Image(data, affine=affine)
    img2 = new_img_like([img, ], data)
    np.testing.assert_array_equal(img.get_data(), img2.get_data())

    # test_new_img_like_with_nifti2image_copy_header
    img_nifti2 = nibabel.Nifti2Image(data, affine=affine)
    img2_nifti2 = new_img_like([img_nifti2, ], data, copy_header=True)
    np.testing.assert_array_equal(img_nifti2.get_data(), img2_nifti2.get_data())
예제 #4
0
def test_new_img_like_mgz():
    """Check that new images can be generated with bool MGZ type
    This is usually when computing masks using MGZ inputs, e.g.
    when using plot_stap_map
    """

    if not LooseVersion(nibabel.__version__) >= LooseVersion('1.2.0'):
        # Old nibabel do not support MGZ files
        raise SkipTest

    ref_img = nibabel.load(os.path.join(datadir, 'test.mgz'))
    data = np.ones(ref_img.get_data().shape, dtype=np.bool)
    affine = ref_img.get_affine()
    new_img_like(ref_img, data, affine, copy_header=False)
예제 #5
0
def test_encode_nii():
    mni = datasets.load_mni152_template()
    encoded = html_stat_map._encode_nii(mni)
    decoded = html_stat_map._decode_nii(encoded)
    assert np.allclose(mni.get_data(), decoded.get_data())

    mni = image.new_img_like(mni, np.asarray(mni.get_data(), dtype='>f8'))
    encoded = html_stat_map._encode_nii(mni)
    decoded = html_stat_map._decode_nii(encoded)
    assert np.allclose(mni.get_data(), decoded.get_data())

    mni = image.new_img_like(mni, np.asarray(mni.get_data(), dtype='<i4'))
    encoded = html_stat_map._encode_nii(mni)
    decoded = html_stat_map._decode_nii(encoded)
    assert np.allclose(mni.get_data(), decoded.get_data())
예제 #6
0
def clean_img(img):
    """ Remove nan/inf entries."""
    img = check_niimg(img)
    img_data = img.get_data()
    img_data[np.isnan(img_data)] = 0
    img_data[np.isinf(img_data)] = 0
    return new_img_like(img, img_data, copy_header=True)
예제 #7
0
def flip_img_lr(img):
    """ Convenience function to flip image on X axis"""
    # This won't work for all image formats! But
    # does work for those that we're working with...
    assert isinstance(img, nib.nifti1.Nifti1Image)
    img = new_img_like(img, data=img.get_data()[::-1], copy_header=True)
    return img
예제 #8
0
파일: utils.py 프로젝트: TomMaullin/tedana
def new_nii_like(ref_img, data, affine=None, copy_header=True):
    """
    Coerces `data` into NiftiImage format like `ref_img`

    Parameters
    ----------
    ref_img : str or img_like
        Reference image
    data : (S [x T]) array_like
        Data to be saved
    affine : (4 x 4) array_like, optional
        Transformation matrix to be used. Default: `ref_img.affine`
    copy_header : bool, optional
        Whether to copy header from `ref_img` to new image. Default: True

    Returns
    -------
    nii : :obj:`nibabel.nifti1.Nifti1Image`
        NiftiImage
    """

    ref_img = check_niimg(ref_img)
    nii = new_img_like(ref_img,
                       data.reshape(ref_img.shape[:3] + data.shape[1:]),
                       affine=affine,
                       copy_header=copy_header)
    nii.set_data_dtype(data.dtype)

    return nii
예제 #9
0
def test_view_img():
    mni = datasets.load_mni152_template()
    with warnings.catch_warnings(record=True) as w:
        # Create a fake functional image by resample the template
        img = image.resample_img(mni, target_affine=3 * np.eye(3))
        html_view = html_stat_map.view_img(img)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold='95%')
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=mni)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=None)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold=2., vmax=4.)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, symmetric_cmap=False)
        img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])
        assert len(img_4d.shape) == 4
        html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.)
        _check_html(html_view)

    # Check that all warnings were expected
    warnings_set = set(warning_.category for warning_ in w)
    expected_set = set([FutureWarning, UserWarning,
                       DeprecationWarning])
    assert warnings_set.issubset(expected_set), (
        "the following warnings were not expected: {}").format(
        warnings_set.difference(expected_set))
예제 #10
0
파일: nilearn-helper.py 프로젝트: xgrg/alfa
def plot_tbss(img, mean_FA_skeleton, start, end, row_l=6, step=1, title='',
    axis='z', pngfile=None):
    ''' Inspired from plot_two_maps. Plots a TBSS contrast map over the
    skeleton of a mean FA map'''

    # Dilate tbss map
    import numpy as np
    from skimage.morphology import cube, dilation
    from nilearn import image
    d = np.array(image.load_img(img).dataobj)
    dil_tbss = dilation(d, cube(2))
    dil_tbss_img = image.new_img_like(img, dil_tbss)

    slice_nb = int(abs(((end - start) / float(step))))
    images = []

    for line in range(int(slice_nb/float(row_l) + 1)):
        opt = {'title':{True:title,
                        False:None}[line==0],
               'colorbar':False,
               'black_bg':True,
               'display_mode':axis,
               'threshold':0.2,
               'cmap': cm.Greens,
               'cut_coords':range(start + line * row_l * step,
                                       start + (line+1) * row_l * step,
                                       step)}
        method = 'plot_stat_map'
        opt.update({'stat_map_img': mean_FA_skeleton})

        t = getattr(plotting, method).__call__(**opt)


        try:
            # Add overlay
            t.add_overlay(dil_tbss_img, cmap=cm.hot, threshold=0.95, colorbar=True)
        except TypeError:
            print img, 'probably empty tbss map'
            pass

        # Converting to PIL and appending it to the list
        buf = io.BytesIO()
        t.savefig(buf)
        buf.seek(0)
        im = Image.open(buf)
        images.append(im)

    # Joining the images
    imsize = images[0].size
    out = Image.new('RGBA', size=(imsize[0], len(images)*imsize[1]))
    for i, im in enumerate(images):
        box = (0, i * imsize[1], imsize[0], (i+1) * imsize[1])
        out.paste(im, box)

    if pngfile is None:
        import tempfile
        pngfile = tempfile.mkstemp(suffix='.png')[1]
    print 'Saving to...', pngfile, '(%s)'%title

    out.save(pngfile)
예제 #11
0
def test_resample_stat_map():

    # Start with simple simulated data
    bg_img, data = _simulate_img()

    # Now double the voxel size and mess with the affine
    affine = 2 * np.eye(4)
    affine[3, 3] = 1
    affine[0, 1] = 0.1
    stat_map_img = Nifti1Image(data, affine)

    # Make a mask for the stat image
    mask_img = new_img_like(stat_map_img, data > 0, stat_map_img.affine)

    # Now run the resampling
    stat_map_img, mask_img = html_stat_map._resample_stat_map(
        stat_map_img, bg_img, mask_img, resampling_interpolation='nearest')

    # Check positive isotropic, near-diagonal affine
    _check_affine(stat_map_img.affine)
    _check_affine(mask_img.affine)

    # Check voxel size matches bg_img
    assert stat_map_img.affine[0, 0] == bg_img.affine[0, 0], (
        "stat_map_img was not resampled at the resolution of background")
    assert mask_img.affine[0, 0] == bg_img.affine[0, 0], (
        "mask_img was not resampled at the resolution of background")
예제 #12
0
def test_new_img_like():
    # Give a list to new_img_like
    data = np.zeros((5, 6, 7))
    data[2:4, 1:5, 3:6] = 1
    affine = np.diag((4, 3, 2, 1))
    img = nibabel.Nifti1Image(data, affine=affine)
    img2 = new_img_like([img, ], data)
    np.testing.assert_array_equal(img.get_data(), img2.get_data())
예제 #13
0
파일: rsn_atlas.py 프로젝트: Neurita/pypes
    def join_networks(self, network_indices):
        """Return a NiftiImage containing a binarised version of the sum of
        the RSN images of each of the `network_indices`."""
        oimg = self._get_img(network_indices[0]).get_data()
        for idx in network_indices[1:]:
            oimg += self._get_img(idx).get_data()

        return niimg.new_img_like(self._get_img(network_indices[0]),
                                  oimg.astype(bool))
예제 #14
0
    def _run_interface(self, runtime):
        t1_img = nli.load_img(self.inputs.in_file)
        t1_data = t1_img.get_data()
        epi_data = nli.load_img(self.inputs.ref_file).get_data()

        # We assume the image is already masked
        mask = t1_data > 0

        t1_min, t1_max = np.unique(t1_data)[[1, -1]]
        epi_min, epi_max = np.unique(epi_data)[[1, -1]]
        scale_factor = (epi_max - epi_min) / (t1_max - t1_min)

        inv_data = mask * ((t1_max - t1_data) * scale_factor + epi_min)

        out_file = fname_presuffix(self.inputs.in_file, suffix='_inv', newpath=runtime.cwd)
        nli.new_img_like(t1_img, inv_data, copy_header=True).to_filename(out_file)
        self._results['out_file'] = out_file
        return runtime
예제 #15
0
def inject_skullstripped(subjects_dir, subject_id, skullstripped):
    mridir = op.join(subjects_dir, subject_id, 'mri')
    t1 = op.join(mridir, 'T1.mgz')
    bm_auto = op.join(mridir, 'brainmask.auto.mgz')
    bm = op.join(mridir, 'brainmask.mgz')

    if not op.exists(bm_auto):
        img = nb.load(t1)
        mask = nb.load(skullstripped)
        bmask = new_img_like(mask, mask.get_data() > 0)
        resampled_mask = resample_to_img(bmask, img, 'nearest')
        masked_image = new_img_like(img, img.get_data() * resampled_mask.get_data())
        masked_image.to_filename(bm_auto)

    if not op.exists(bm):
        copyfile(bm_auto, bm, copy=True, use_hardlink=True)

    return subjects_dir, subject_id
예제 #16
0
파일: denoising.py 프로젝트: xgrg/alfa
def rescale(source, target):
    import nibabel as nib
    import numpy as np
    from nilearn import image

    n = nib.load(source)
    d = np.array(n.dataobj)
    s = n.dataobj.slope
    i = image.new_img_like(n, d/s)
    i.to_filename(target)
    log.info('Rescaling done: %s rescaled to %s'%(source, target))
예제 #17
0
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
예제 #18
0
파일: utils.py 프로젝트: Neurita/pypes
def get_largest_blobs(ic_maps):
    """ Generator for the largest blobs in each IC spatial map.
    These should be masked and thresholded.

    Parameters
    ----------
    ic_maps: sequence of niimg-like

    Returns
    -------
    blobs: generator of niimg-like
    """
    # store the average value of the blob in a list
    for i, icimg in enumerate(iter_img(ic_maps)):
        yield niimg.new_img_like(icimg, largest_connected_component(icimg.get_data()))
예제 #19
0
def dicom2nii(filename):
    dicomObject = dicom.read_file(filename)
    niftiObject = dicomreaders.mosaic_to_nii(dicomObject)
    temp_data = niftiObject.get_data()
    output_image_correct = nib.orientations.apply_orientation(
        temp_data, ornt_transform)
    correct_object = new_img_like(templateFunctionalVolume,
                                  output_image_correct,
                                  copy_header=True)
    # print(nib.aff2axcodes(niftiObject.affine))
    splitList = filename.split('/')
    # fullNiftiFilename='/'+os.path.join(*splitList[0:-1] , splitList[-1].split('.')[0]+'.nii.gz')
    fullNiftiFilename = os.path.join(tmp_folder,
                                     splitList[-1].split('.')[0] + '.nii.gz')
    print('fullNiftiFilename=', fullNiftiFilename)
    correct_object.to_filename(fullNiftiFilename)
    return fullNiftiFilename
예제 #20
0
def signal_fluctuation_sensitivity(img: niimg_like) -> nib.nifti1.Nifti1Image:
    """Compute voxelwise SFS given an img in MNI space and an array containing
    a CSF confounding regressor"""
    # first_term is the "first term" of the SFS, as referred to in the paper.
    meanimg = voxelwise_mean(img)
    first_term = meanimg.get_data() / global_mean_within_mni(img)

    # Similary, the second term is the "second term" in the paper.
    stdimg = voxelwise_std(img)
    second_term = stdimg.get_data() / confound_std(img)

    # SFS is the product of both terms. Here we return it as an image.
    sfs = first_term * second_term
    return image.new_img_like(img,
                              data=sfs,
                              affine=meanimg.affine,
                              copy_header=True)
예제 #21
0
def _get_data_and_json_view(black_bg, cbar):
    # simple simulated data for stat_img and background
    bg_img, data = _simulate_img()
    stat_map_img, data = _simulate_img()

    # make a mask
    mask_img = new_img_like(stat_map_img, data > 0, stat_map_img.affine)

    # Get color bar and data ranges
    colors = colorscale('cold_hot', data.ravel(), threshold=0,
                        symmetric_cmap=True, vmax=1)

    # Build a sprite
    json_view = html_stat_map._json_view_data(
        bg_img, stat_map_img, mask_img, bg_min=0, bg_max=1, black_bg=black_bg,
        colors=colors, cmap='cold_hot', colorbar=cbar)
    return data, json_view
예제 #22
0
def test_get_data():
    img, *_ = data_gen.generate_fake_fmri(shape=(10, 11, 12))
    data = get_data(img)
    assert data.shape == img.shape
    assert data is img._data_cache
    mask_img = new_img_like(img, data > 0)
    data = get_data(mask_img)
    assert data.dtype == np.dtype('int8')
    img_3d = index_img(img, 0)
    with tempfile.TemporaryDirectory() as tempdir:
        filename = os.path.join(tempdir, 'img_{}.nii.gz')
        img_3d.to_filename(filename.format('a'))
        img_3d.to_filename(filename.format('b'))
        data = get_data(filename.format('a'))
        assert len(data.shape) == 3
        data = get_data(filename.format('*'))
        assert len(data.shape) == 4
예제 #23
0
def copy_header(in_file, data_file, out_file=None):
    """ Use nilearn.image.new_img_like to copy the header
    from `in_file` to `data_file` and return the result.
    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    import nilearn.image as niimg

    img = niimg.load_img(data_file)

    return niimg.new_img_like(
        in_file,
        img.get_data(),
        affine=img.affine, copy_header=True
    )
예제 #24
0
파일: predict.py 프로젝트: zbx99/3DUnetCNN
def single_volume_zstat_denoising(model_filename, model_name, n_features, filenames, window, prediction_dir,
                                  n_gpus=1, batch_size=1, model_kwargs=None, n_outputs=None,
                                  sequence_kwargs=None, spacing=None, sequence=None,
                                  strict_model_loading=True, metric_names=None,
                                  verbose=True, resample_predictions=False, **unused_kwargs):
    import torch
    model, dataset, basename = load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs,
                                                                 n_features, strict_model_loading, n_gpus, sequence,
                                                                 sequence_kwargs, filenames, window, spacing,
                                                                 metric_names)
    dataset.extract_sub_volumes = False
    print("Dataset: ", len(dataset))
    with torch.no_grad():
        completed = set()
        batch = list()
        for idx in range(len(dataset)):
            x_filename, subject_id = get_feature_filename_and_subject_id(dataset, idx, verbose=False)
            while type(x_filename) == list:
                x_filename = x_filename[0]
            if x_filename in completed:
                continue
            if verbose:
                print("Reading:", x_filename)
            x_image, ref_image = load_images_from_dataset(dataset, idx, resample_predictions)
            if len(x_image.shape) == 4:
                volumes_per_image = x_image.shape[3]
                prediction_data = np.zeros(x_image.shape)
            else:
                volumes_per_image = 1
                prediction_data = np.zeros(x_image.shape + (volumes_per_image,))
            data = get_nibabel_data(x_image)
            for image_idx in range(volumes_per_image):
                batch.append(data[..., image_idx][..., None])
                if len(batch) >= batch_size or image_idx == volumes_per_image - 1:
                    prediction = pytorch_predict_batch_array(model, batch, n_gpus)
                    prediction = np.moveaxis(prediction, 0, -1).squeeze()
                    prediction_data[..., (image_idx - prediction.shape[-1] + 1):(image_idx + 1)] = prediction
                    batch = list()
            pred_image = new_img_like(ref_niimg=x_image, data=prediction_data)
            output_filename = os.path.join(prediction_dir, "_".join((subject_id,
                                                                     basename,
                                                                     os.path.basename(x_filename))))
            if verbose:
                print("Writing:", output_filename)
            pred_image.to_filename(output_filename)
            completed.add(x_filename)
예제 #25
0
def resize(image, new_shape, interpolation="linear"):
    if image.shape == new_shape:
        return image
    else:
        image = reorder_img(image, resample=interpolation)
        zoom_level = np.divide([float(i) for i in new_shape],
                               [float(i) for i in image.shape])
        new_spacing = np.divide(image.header.get_zooms(), zoom_level)
        new_data = resample_to_spacing(image.get_data(),
                                       image.header.get_zooms(),
                                       new_spacing,
                                       interpolation=interpolation)
        new_affine = np.copy(image.affine)
        np.fill_diagonal(new_affine, new_spacing.tolist() + [1])
        new_affine[:3, 3] += calculate_origin_offset(new_spacing,
                                                     image.header.get_zooms())
        return new_img_like(image, new_data, affine=new_affine)
예제 #26
0
def gen_img_list(uatlas):
    """
    Return list of boolean nifti masks where each masks corresponds to a unique atlas label for the provided atlas
    parcellation. Path string to Nifti1Image is input.

    Parameters
    ----------
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.

    Returns
    -------
    img_list : list
        List of binarized Nifti1Images corresponding to ROI masks for each unique atlas label.
    """
    import gc
    import os.path as op
    from nilearn.image import new_img_like
    if not op.isfile(uatlas):
        raise ValueError('\nERROR: User-specified atlas input not found! Check that the file(s) specified with the -ua '
                         'flag exist(s)')

    bna_img = nib.load(uatlas)
    bna_data = np.around(np.asarray(bna_img.dataobj)).astype('uint16')

    # Get an array of unique parcels
    bna_data_for_coords_uniq = np.unique(bna_data)

    # Number of parcels:
    par_max = len(bna_data_for_coords_uniq) - 1
    img_stack = []
    for idx in range(1, par_max + 1):
        roi_img = bna_data == bna_data_for_coords_uniq[idx].astype('uint16')
        img_stack.append(roi_img.astype('uint16'))
    img_stack = np.array(img_stack)

    img_list = []
    for idy in range(par_max):
        img_list.append(new_img_like(bna_img, img_stack[idy]))

    del img_stack

    bna_img.uncache()
    gc.collect()

    return img_list
예제 #27
0
def get_foreground_from_set_of_files(set_of_files,
                                     background_value=0,
                                     tolerance=0.00001,
                                     return_image=False):
    for i, image_file in enumerate(set_of_files):
        image = read_image(image_file)
        is_foreground = np.logical_or(
            image.get_data() < (background_value - tolerance),
            image.get_data() > (background_value + tolerance))
        if i == 0:
            foreground = np.zeros(is_foreground.shape, dtype=np.uint8)

        foreground[is_foreground] = 1
    if return_image:
        return new_img_like(image, foreground)
    else:
        return foreground
예제 #28
0
def resize(image, new_shape, interpolation='linear'):
    image = reorder_img(image, resample=interpolation)
    # print('reorder_img', image)
    # print(image.shape)
    ## 两次相除
    zoom_level = np.divide(new_shape, image.shape)
    new_spacing = np.divide(image.header.get_zooms(), zoom_level)
    # print('new_spacing', new_spacing)
    new_data = resample_to_spacing(image.get_data(),
                                   image.header.get_zooms(),
                                   new_spacing,
                                   interpolation=interpolation)
    new_affine = np.copy(image.affine)
    np.fill_diagonal(new_affine, new_spacing.tolist() + [1])
    new_affine[:3, 3] += calculate_origin_offset(new_spacing,
                                                 image.header.get_zooms())
    return new_img_like(image, new_data, affine=new_affine)
예제 #29
0
def test_nifti_spheres_masker_inverse_overlap():
    rng = np.random.RandomState(42)

    # Test overlapping data in inverse_transform
    affine = np.eye(4)
    shape = (5, 5, 5)

    data = rng.random_sample(shape + (5, ))
    fmri_img = nibabel.Nifti1Image(data, affine)

    # Apply mask image - to allow inversion
    mask_img = new_img_like(fmri_img, np.ones(shape))
    seeds = [(0, 0, 0), (2, 2, 2)]
    # Inverse data
    inv_data = rng.random_sample(len(seeds))

    overlapping_masker = NiftiSpheresMasker(seeds,
                                            radius=1,
                                            allow_overlap=True,
                                            mask_img=mask_img).fit()
    overlapping_masker.inverse_transform(inv_data)

    overlapping_masker = NiftiSpheresMasker(seeds,
                                            radius=2,
                                            allow_overlap=True,
                                            mask_img=mask_img).fit()

    overlap = overlapping_masker.inverse_transform(inv_data)

    # Test whether overlapping data is averaged
    assert_array_almost_equal(get_data(overlap)[1, 1, 1], np.mean(inv_data))

    noverlapping_masker = NiftiSpheresMasker(seeds,
                                             radius=1,
                                             allow_overlap=False,
                                             mask_img=mask_img).fit()

    noverlapping_masker.inverse_transform(inv_data)
    noverlapping_masker = NiftiSpheresMasker(seeds,
                                             radius=2,
                                             allow_overlap=False,
                                             mask_img=mask_img).fit()

    with pytest.raises(ValueError, match='Overlap detected'):
        noverlapping_masker.inverse_transform(inv_data)
예제 #30
0
def test_img_to_signals_labels_non_float_type(target_dtype):
    fake_fmri_data = np.random.RandomState(0).rand(10, 10, 10, 10) > 0.5
    fake_affine = np.eye(4, 4).astype(np.float64)
    fake_fmri_img_orig = nibabel.Nifti1Image(
        fake_fmri_data.astype(np.float64),
        fake_affine,
    )
    fake_fmri_img_target_dtype = new_img_like(
        fake_fmri_img_orig, fake_fmri_data.astype(target_dtype))
    fake_mask_data = np.ones((10, 10, 10), dtype=np.uint8)
    fake_mask = nibabel.Nifti1Image(fake_mask_data, fake_affine)

    masker = NiftiLabelsMasker(fake_mask)
    masker.fit()
    timeseries_int = masker.transform(fake_fmri_img_target_dtype)
    timeseries_float = masker.transform(fake_fmri_img_orig)
    assert np.sum(timeseries_int) != 0
    assert np.allclose(timeseries_int, timeseries_float)
예제 #31
0
def _merge_mask(in_files):
    in_imgs = [nib.load(in_file) for in_file in in_files]

    outshape = first(in_imgs).shape
    assert all(in_img.shape == outshape
               for in_img in in_imgs), "Mask shape mismatch"

    in_data = [
        np.asanyarray(in_img.dataobj).astype(np.bool) for in_img in in_imgs
    ]
    outarr = np.logical_and.reduce(in_data)

    outimg = new_img_like(first(in_imgs), outarr, copy_header=True)

    merged_file = _merge_fname(in_files)
    nib.save(outimg, merged_file)

    return merged_file
예제 #32
0
def save2volbord_bci(data, outfile, bfp_path, smooth_std=0):
    '''Save output to brainordinates'''
    a = loadmat(join(bfp_path, 'supp_data', 'bord_ind.mat'))
    v = load_img(
        join(bfp_path, 'supp_data', 'BCI-DNI_brain.pvc.frac.3mm.nii.gz'))

    img_dat = np.zeros(v.shape)
    img_dat[np.unravel_index(a['ind'], img_dat.shape, order='F')] = data[:,
                                                                         None]

    if smooth_std > 0:
        img_dat = gaussian_filter(img_dat,
                                  sigma=(smooth_std, smooth_std, smooth_std),
                                  order=0)

    v2 = new_img_like(v, img_dat)

    v2.to_filename(outfile)
예제 #33
0
def extract_clusters(folder):
    from nilearn.image import get_data, new_img_like
    
    os.chdir(folder)
    effects = [i.split('_where')[0] for i in os.listdir() if 'where' in i]

    for eff in effects:
        out_dir = f'{eff}_cluster_masks';mkdir(out_dir)

        cmap = f'{eff}_cluster_map.nii.gz'
        cluster_data = get_data(cmap)
        for clust in np.unique(cluster_data):
            if clust > 0:
                mask_data = np.zeros(cluster_data.shape)
                mask_data[np.where(cluster_data == clust)] = 1

                out_mask = new_img_like(nib.load(cmap),mask_data.astype(int),copy_header=True)
                nib.save(out_mask,f'{out_dir}/cluster_{clust}_mask.nii.gz')
예제 #34
0
def saveAsNiftiImage(dicomDataObject, fullNiftiFilename, cfg, reference):
    """
    This function takes in a dicom data object written in bytes, what you expect
    the dicom file to be called (we will use the same name format for the nifti
    file), and the config file while will have (1) the axes transformation for the
    dicom file and (2) the header information from a reference scan.

    Used externally.
    """
    niftiObject = dicomreaders.mosaic_to_nii(dicomDataObject)
    temp_data = niftiObject.get_fdata()
    output_image_correct = nib.orientations.apply_orientation(
        temp_data, cfg.axesTransform)
    correct_object = new_img_like(reference,
                                  output_image_correct,
                                  copy_header=True)
    correct_object.to_filename(fullNiftiFilename)
    return fullNiftiFilename
예제 #35
0
    def fit(self, X=None, y=None):  # noqa
        super(HemisphereMasker, self).fit(X, y)

        # x, y, z
        hemi_mask_data = reorder_img(self.mask_img_).get_data().astype(np.bool)

        xvals = hemi_mask_data.shape[0]
        midpt = np.ceil(xvals / 2.0)
        if self.hemi == "r":
            other_hemi_slice = slice(midpt, xvals)
        else:
            other_hemi_slice = slice(0, midpt)

        hemi_mask_data[other_hemi_slice] = False
        mask_data = self.mask_img_.get_data() * hemi_mask_data
        self.mask_img_ = new_img_like(self.mask_img_, data=mask_data)

        return self
예제 #36
0
def test_vol_to_surf(kind, n_scans, use_mask):
    img, mask_img = data_gen.generate_mni_space_img(n_scans)
    if not use_mask:
        mask_img = None
    if n_scans == 1:
        img = image.new_img_like(img, image.get_data(img).squeeze())
    fsaverage = datasets.fetch_surf_fsaverage()
    mesh = surface.load_surf_mesh(fsaverage["pial_left"])
    inner_mesh = surface.load_surf_mesh(fsaverage["white_left"])
    center_mesh = np.mean([mesh[0], inner_mesh[0]], axis=0), mesh[1]
    proj = surface.vol_to_surf(
        img, mesh, kind="depth", inner_mesh=inner_mesh, mask_img=mask_img)
    other_proj = surface.vol_to_surf(
        img, center_mesh, kind=kind, mask_img=mask_img)
    correlation = pearsonr(proj.ravel(), other_proj.ravel())[0]
    assert correlation > .99
    with pytest.raises(ValueError, match=".*interpolation.*"):
        surface.vol_to_surf(img, mesh, interpolation="bad")
예제 #37
0
    def _run_interface(self, runtime):
        self._merged_file = None

        if not isdefined(self.inputs.in_files):
            self._results["merged_file"] = False
            return runtime

        in_imgs = [nib.load(in_file) for in_file in self.inputs.in_files]

        if len(in_imgs) == 0:
            self._results["merged_file"] = False
            return runtime

        idim = dimensions.index(self.inputs.dimension)

        sizes = [niftidim(in_img, idim) for in_img in in_imgs]

        outshape = list(first(in_imgs).shape)
        while len(outshape) < idim + 1:
            outshape.append(1)

        outshape[idim] = sum(sizes)

        movd_shape = [outshape[idim], *outshape[:idim], *outshape[idim + 1:]]
        movd_outarr = np.zeros(movd_shape, dtype=np.float64)

        i = 0
        for in_img, size in zip(in_imgs, sizes):
            in_data = in_img.get_fdata()
            while len(in_data.shape) < idim + 1:
                in_data = np.expand_dims(in_data, len(in_data.shape))
            movd_outarr[i:i + size] = np.moveaxis(in_data, idim, 0)
            i += size

        outarr = np.moveaxis(movd_outarr, 0, idim)

        outimg = new_img_like(first(in_imgs), outarr, copy_header=True)

        merged_file = op.abspath("merged.nii.gz")
        nib.save(outimg, merged_file)

        self._results["merged_file"] = merged_file

        return runtime
예제 #38
0
def _crop_img_to(img, slices, copy=True):
    """Crops image to a smaller size
    Crop img to size indicated by slices and adjust affine
    accordingly
    Parameters
    ----------
    img: Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        Img to be cropped. If slices has less entries than img
        has dimensions, the slices will be applied to the first len(slices)
        dimensions
    slices: list of slices
        Defines the range of the crop.
        E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
        defines a 3D cube
    copy: boolean
        Specifies whether cropped data is to be copied or not.
        Default: True
    Returns
    -------
    cropped_img: Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        Cropped version of the input image
    """

    img = check_niimg(img)

    data = img.get_data()
    affine = img.affine

    cropped_data = data[slices]
    if copy:
        cropped_data = cropped_data.copy()

    linear_part = affine[:3, :3]
    old_origin = affine[:3, 3]
    new_origin_voxel = np.array([s.start for s in slices])
    new_origin = old_origin + linear_part.dot(new_origin_voxel)

    new_affine = np.eye(4)
    new_affine[:3, :3] = linear_part
    new_affine[:3, 3] = new_origin

    return new_img_like(img, cropped_data, new_affine)
예제 #39
0
파일: utils.py 프로젝트: dPys/PyNets
def segment_t1w(t1w, basename, nclass=3, beta=0.1, max_iter=100):
    """
    A function to use FSL's FAST to segment an anatomical
    image into GM, WM, and CSF prob maps.

    Parameters
    ----------
    t1w : str
        File path to an anatomical T1-weighted image.
    basename : str
        A basename to use for output files.

    Returns
    -------
    out : str
        File path to the probability map Nifti1Image consisting of GM, WM,
        and CSF in the 4th dimension.

    """
    from dipy.segment.tissue import TissueClassifierHMRF
    from nilearn.image import new_img_like

    print("Segmenting T1w...")

    t1w_img = nib.load(t1w)
    hmrf = TissueClassifierHMRF()
    PVE = hmrf.classify(t1w_img.get_fdata(), nclass, beta,
                        max_iter=max_iter)[2]

    new_img_like(t1w_img, PVE[..., 2]).to_filename(
        f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_{'pve_0.nii.gz'}")

    new_img_like(t1w_img, PVE[..., 1]).to_filename(
        f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_{'pve_1.nii.gz'}")

    new_img_like(t1w_img, PVE[..., 0]).to_filename(
        f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_{'pve_2.nii.gz'}")

    out = {}  # the outputs
    out["wm_prob"] = f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_" \
                     f"pve_0.nii.gz"
    out["gm_prob"] = f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_" \
                     f"pve_1.nii.gz"
    out["csf_prob"] = f"{os.path.dirname(os.path.abspath(t1w))}/{basename}_" \
                      f"pve_2.nii.gz"

    del PVE
    t1w_img.uncache()
    gc.collect()

    return out
예제 #40
0
def save2volgord_bci(data, out_dir, vol_name, bfp_path, fsl_path,
                     default_value):

    vol = load_img(
        join(bfp_path, 'supp_data', 'Vol_BCI_grayord32k.mask.nii.gz'))
    a = loadmat(join(bfp_path, 'supp_data', 'bci_grayordinates_vol_ind.mat'))

    ind = a['bci_vol_ind'] - 1
    gordvol = np.zeros(vol.shape) + default_value

    val_gind = ~np.isnan(ind)
    ind2 = np.int32(ind[val_gind])
    ind2 = np.unravel_index(np.int32(ind2), vol.shape, order='F')
    gordvol[ind2] = data[val_gind.squeeze()]

    grod = new_img_like(vol, gordvol)
    grod.set_data_dtype(np.float64)
    outfile = join(out_dir, vol_name + '.nii.gz')
    grod.to_filename(outfile)
def _interpolate_frames(data: Union[Nifti1Image, npt.ArrayLike],
                        mask: npt.ArrayLike, censor: npt.ArrayLike,
                        t_r: float) -> Union[Nifti1Image, npt.ArrayLike]:
    '''
    Interpolates `censor` using `img` data in `mask`
    using lombscargle non-uniform spectral interpolation

    Arguments:
        data: Input data to censor where last index denotes time-points
            [N1 x ,..., x T]
        mask: Non-censored array indices from uncensored data
        censor: Censored array indices that were removed from `img`
        t_r: Repetition time
    '''

    if not censor.any():
        return data

    is_nifti = False
    if isinstance(data, Nifti1Image):
        sgls = _image_to_signals(data)
        is_nifti = True
    else:
        sgls = data

    t_num_samples = len(censor) + len(mask)

    # Lombscargle interpolate expects already censored data
    if sgls.shape[1] == t_num_samples:
        sgls = sgls[:, mask]

    t = np.arange(0, t_num_samples) * t_r
    interp_vals = lombscargle_interpolate(t=t[mask], x=sgls, s=t)

    res = np.empty((sgls.shape[0], t_num_samples), dtype=sgls.dtype)
    res[:, mask] = sgls
    res[:, censor] = interp_vals[:, censor]
    res = res.reshape((*data.shape[:-1], t_num_samples))

    if is_nifti:
        return nimg.new_img_like(data, res, copy_header=True)

    return res
예제 #42
0
def split_bilateral_rois(maps_img):
    """Convenience function for splitting bilateral ROIs
    into two unilateral ROIs"""

    new_rois = []

    for map_img in iter_img(maps_img):
        for hemi in ["L", "R"]:
            hemi_mask = HemisphereMasker(hemisphere=hemi)
            hemi_mask.fit(map_img)
            if hemi_mask.mask_img_.get_data().sum() > 0:
                hemi_vectors = hemi_mask.transform(map_img)
                hemi_img = hemi_mask.inverse_transform(hemi_vectors)
                new_rois.append(hemi_img.get_data())

    new_maps_data = np.concatenate(new_rois, axis=3)
    new_maps_img = new_img_like(maps_img, data=new_maps_data, copy_header=True)
    print("Changed from %d ROIs to %d ROIs" % (maps_img.shape[-1], new_maps_img.shape[-1]))
    return new_maps_img
예제 #43
0
def get_foreground_from_set_of_files(set_of_files,
                                     background_value=0,
                                     tolerance=0.00001,
                                     return_image=False):
    for i, image_file in enumerate(set_of_files):
        # "read_image" luc nay chi doc nguyen hinh, ko thuc hien xu ly gi ca !
        image = read_image(image_file)
        # La foreground khi gia tri khac 0.
        is_foreground = np.logical_or(
            image.get_data() < (background_value - tolerance),
            image.get_data() > (background_value + tolerance))
        if i == 0:
            foreground = np.zeros(is_foreground.shape, dtype=np.uint8)

        foreground[is_foreground] = 1
    if return_image:
        return new_img_like(image, foreground)
    else:
        return foreground
예제 #44
0
    def __init__(
        self,
        sessions=None,
        smoothing_fwhm=None,
        standardize=False,
        detrend=False,
        low_pass=None,
        high_pass=None,
        t_r=None,
        target_affine=None,
        target_shape=None,
        mask_strategy="background",
        mask_args=None,
        sample_mask=None,
        memory_level=1,
        memory=Memory(cachedir=None),
        verbose=0,
    ):

        # Use grey matter mask computed for Neurovault analysis
        # ('https://github.com/NeuroVault/neurovault_analysis/')
        target_img = nib.load(fetch_grey_matter_mask())
        grey_voxels = (target_img.get_data() > 0).astype(int)
        mask_img = new_img_like(target_img, grey_voxels, copy_header=True)

        super(GreyMatterNiftiMasker, self).__init__(
            mask_img=mask_img,
            target_affine=mask_img.affine,
            target_shape=mask_img.shape,
            sessions=sessions,
            smoothing_fwhm=smoothing_fwhm,
            standardize=standardize,
            detrend=detrend,
            low_pass=low_pass,
            high_pass=high_pass,
            t_r=t_r,
            mask_strategy=mask_strategy,
            mask_args=mask_args,
            sample_mask=sample_mask,
            memory_level=memory_level,
            memory=memory,
            verbose=verbose,
        )
예제 #45
0
def test_view_stat_map():
    mni = datasets.load_mni152_template()
    # Create a fake functional image by resample the template
    img = image.resample_img(mni, target_affine=3 * np.eye(3))
    html = html_stat_map.view_stat_map(img)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold='95%')
    _check_html(html)
    html = html_stat_map.view_stat_map(img, bg_img=mni)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, bg_img=None)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold=2., vmax=4.)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, symmetric_cmap=False)
    img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])
    assert len(img_4d.shape) == 4
    html = html_stat_map.view_stat_map(img_4d, threshold=2., vmax=4.)
    _check_html(html)
def get_ROI_from_parcel(parcel, ROI, threshold=0):
    """
    Extracts ROI from parcel
    If 4D probabilistic parcel, a threshould must be defined to determine the cutoff for the ROI.
    If 3d parcel (atlas), threshold is ignored.
    
    Args:
        parcel: 3D or 4D parcel
        ROI: index of ROI. Should be 0 indexed. For 4D, extract the ROI_i slice in the 4th dimension.
                If 3D, find values that are equal to ROI_i+1 (assumes 0 is used to reflect background)
    """
    if len(parcel.shape) == 4:
        # convert a probabilistic parcellation into an ROI mask
        roi_mask = parcel.get_fdata()[:, :, :, ROI] > threshold
    else:
        roi_mask = parcel.get_fdata() == (ROI + 1)
    assert np.sum(roi_mask) != 0, "ROI doesn't exist. Returned empty map"
    roi_mask = image.new_img_like(parcel, roi_mask)
    return roi_mask
예제 #47
0
    def _run_interface(self, runtime):
        mask_img = nib.load(self.inputs.mask_file)
        mask = np.asanyarray(mask_img.dataobj).astype(bool)
        mask = np.squeeze(mask)

        min_coverage = self.inputs.min_coverage
        if not isdefined(min_coverage) or not isinstance(min_coverage, float):
            min_coverage = 1.0
        self._min_coverage = min_coverage

        self._out_files = []
        self._coverage = []

        for in_file in self.inputs.in_files:
            in_img = nib.load(in_file)
            in_bool = np.asanyarray(in_img.dataobj).astype(bool)
            in_bool = np.squeeze(in_bool)

            unmasked_n_voxels = np.count_nonzero(in_bool)

            out_bool = np.logical_and(in_bool, mask)

            masked_n_voxels = np.count_nonzero(out_bool)

            coverage = float(masked_n_voxels) / float(unmasked_n_voxels)

            self._coverage.append(coverage)

            if coverage < min_coverage:
                self._out_files.append(None)

                continue

            stem, _ = split_ext(in_file)

            out_file = Path.cwd() / f"{stem}_masked.nii.gz"

            out_img = new_img_like(in_img, out_bool, copy_header=True)
            nib.save(out_img, out_file)

            self._out_files.append(out_file)

        return runtime
예제 #48
0
파일: merge.py 프로젝트: HALFpipe/Atlases
    def write(self, out_prefix):
        ties = np.sum(self.masks.to_list(), axis=0, dtype=np.uint16) > 1

        if np.any(ties):
            warn(
                f"Atlases to be merged have {np.sum(ties):d} ties. "
                "Resolving by closest center of mass"
            )

            centers_of_mass = np.vstack([*map(center_of_mass, self.masks)])

            for coordinate in zip(*np.nonzero(ties)):
                candidates = [*map(lambda m: m[coordinate], self.masks)]
                distance = cdist(
                    np.asarray(coordinate, dtype=np.float64)[np.newaxis, :],
                    centers_of_mass[candidates, :]
                )
                indices = self.masks.index[candidates]
                winner_index = indices[np.argmin(distance)]
                warn(
                    f"Assigning coordinate {coordinate} to "
                    f"{self.labels.loc[winner_index]:s}"
                )
                for index in indices:
                    if index == winner_index:
                        continue
                    self.masks.loc[index][coordinate] = False

        assert np.all(
            np.sum(self.masks.to_list(), axis=0, dtype=np.uint16) <= 1
        )  # double check ties

        atlas = np.sum(
            [*map(lambda t: t[0] * t[1], self.masks.items())],
            axis=0,
            dtype=np.uint16
        )

        out_atlas_img = new_img_like(self.fixed_img, atlas, copy_header=True)
        out_atlas_img.header["descrip"] = f"Generated by HALFpipe/Atlases version {__version__}"
        nib.save(out_atlas_img, f"{out_prefix}.nii.gz")

        self.labels.to_csv(f"{out_prefix}.txt", sep="\t", header=False)
예제 #49
0
def test_view_img_on_surf():
    img = _get_img()
    fsaverage = dict(fetch_surf_fsaverage())
    html = html_surface.view_img_on_surf(img, threshold='92.3%')
    check_html(html)
    html = html_surface.view_img_on_surf(img, threshold=0, surf_mesh=fsaverage)
    check_html(html)
    html = html_surface.view_img_on_surf(img, threshold=.4)
    check_html(html)
    html = html_surface.view_img_on_surf(
        img, threshold=.4, cmap='hot', black_bg=True)
    check_html(html)
    html = html_surface.view_img_on_surf(img, surf_mesh='fsaverage')
    check_html(html)
    assert_raises(DimensionError, html_surface.view_img_on_surf, [img, img])
    img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])
    assert len(img_4d.shape) == 4
    html = html_surface.view_img_on_surf(img, threshold='92.3%')
    check_html(html)
예제 #50
0
    def __init__(
        self,
        sessions=None,
        smoothing_fwhm=None,
        standardize=False,
        detrend=False,
        low_pass=None,
        high_pass=None,
        t_r=None,
        target_affine=None,
        target_shape=None,
        mask_strategy="background",
        mask_args=None,
        sample_mask=None,
        memory_level=1,
        memory=Memory(cachedir=None),
        verbose=0,
    ):

        # Create grey matter mask from mni template
        target_img = datasets.load_mni152_template()
        grey_voxels = (target_img.get_data() > 0).astype(int)
        mask_img = new_img_like(target_img, grey_voxels, copy_header=True)

        super(MniNiftiMasker, self).__init__(
            mask_img=mask_img,
            target_affine=mask_img.affine,
            target_shape=mask_img.shape,
            sessions=sessions,
            smoothing_fwhm=smoothing_fwhm,
            standardize=standardize,
            detrend=detrend,
            low_pass=low_pass,
            high_pass=high_pass,
            t_r=t_r,
            mask_strategy=mask_strategy,
            mask_args=mask_args,
            sample_mask=sample_mask,
            memory_level=memory_level,
            memory=memory,
            verbose=verbose,
        )
예제 #51
0
def create_parcel_atlas(parcel_list):
    """
    Create a 3D Nifti1Image atlas parcellation of consecutive integer intensities from an input list of ROI's.

    Parameters
    ----------
    parcel_list : list
        List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks.

    Returns
    -------
    net_parcels_map_nifti : Nifti1Image
        A nibabel-based nifti image consisting of a 3D array with integer voxel intensities corresponding to ROI
        membership.
    parcel_list_exp : list
        List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks, prepended with a
        background image of zeros.
    """
    import gc
    from nilearn.image import new_img_like, concat_imgs

    parcel_list_exp = [
        new_img_like(parcel_list[0], np.zeros(parcel_list[0].shape,
                                              dtype=bool))
    ] + parcel_list
    concatted_parcels = concat_imgs(parcel_list_exp, dtype=np.float32)
    parcel_list_exp = np.array(range(len(parcel_list_exp))).astype("float32")
    parcel_sum = np.sum(parcel_list_exp *
                        np.asarray(concatted_parcels.dataobj),
                        axis=3,
                        dtype=np.uint16)
    par_max = np.max(parcel_list_exp)
    outs = np.unique(parcel_sum[parcel_sum > par_max])
    # Set overlapping cases to zero.
    for out in outs:
        parcel_sum[parcel_sum == out] = 0
    net_parcels_map_nifti = nib.Nifti1Image(parcel_sum,
                                            affine=parcel_list[0].affine)
    del concatted_parcels, parcel_sum, parcel_list
    gc.collect()

    return net_parcels_map_nifti, parcel_list_exp
예제 #52
0
def map_plane(estimates,
              atlas,
              path,
              suffix="",
              plane="z",
              cbar=False,
              annotate=False,
              vmin=None,
              vmax=None,
              cmaps=[],
              print_fig=True,
              verbose=False):

    from nilearn import image, plotting

    for f, feature in enumerate(estimates.columns):
        stat_map = image.copy_img(atlas).get_data()
        data = estimates[feature]
        if verbose:
            print("{:20s} Min: {:6.4f}  Mean: {:6.4f}  Max: {:6.4f}".format(
                feature, min(data), np.mean(data), max(data)))
        if not verbose and print_fig:
            print("\n{}".format(feature))
        for i, value in enumerate(data):
            stat_map[stat_map == i + 1] = value
        stat_map = image.new_img_like(atlas, stat_map)
        display = plotting.plot_stat_map(stat_map,
                                         display_mode=plane,
                                         symmetric_cbar=False,
                                         colorbar=cbar,
                                         cmap=cmaps[f],
                                         threshold=vmin,
                                         vmax=vmax,
                                         alpha=0.5,
                                         annotate=annotate,
                                         draw_cross=False)
        file_name = "{}/{}{}.png".format(path, feature, suffix)
        display.savefig(file_name, dpi=250)
        transparent_background(file_name)
        if print_fig:
            plotting.show()
        display.close()
예제 #53
0
def gen_img_list(uatlas_select):
    from nilearn.image import new_img_like
    bna_img = nib.load(uatlas_select)
    bna_data = np.round(bna_img.get_data(), 1)
    # Get an array of unique parcels
    bna_data_for_coords_uniq = np.unique(bna_data)
    # Number of parcels:
    par_max = len(bna_data_for_coords_uniq) - 1
    bna_data = bna_data.astype('int16')
    img_stack = []
    for idx in range(1, par_max + 1):
        roi_img = bna_data == bna_data_for_coords_uniq[idx].astype('int16')
        roi_img = roi_img.astype('int16')
        img_stack.append(roi_img)
    img_stack = np.array(img_stack).astype('int16')
    img_list = []
    for idy in range(par_max):
        roi_img_nifti = new_img_like(bna_img, img_stack[idy])
        img_list.append(roi_img_nifti)
    return img_list
def _region_extractor_labels_image(atlas, extract_type='connected_components',
                                   min_region_size=0):
    """ Function takes atlas image denoted as labels for each region
        and then imposes region extraction algorithm on the image to
        split them into regions apart.

    Parameters
    ----------
    atlas : 3D Nifti-like image
        An image contains labelled regions.

    extract_type : 'connected_components', 'local_regions'
        See nilearn.regions.connected_regions for full documentation

    min_region_size : in mm^3
        Minimum size of voxels in a region to be kept.

    """
    atlas_img = check_niimg(atlas)
    atlas_data = _safe_get_data(atlas_img)
    affine = atlas_img.get_affine()

    n_labels = np.unique(np.asarray(atlas_data))

    reg_imgs = []
    for label_id in n_labels:
        if label_id == 0:
            continue
        print("[Region Extraction] Processing with label {0}".format(label_id))
        region = (atlas_data == label_id) * atlas_data
        reg_img = new_img_like(atlas_img, region)
        regions, _ = connected_regions(reg_img, extract_type=extract_type,
                                       min_region_size=min_region_size)
        reg_imgs.append(regions)
    regions_extracted = concat_niimgs(reg_imgs)

    return regions_extracted, n_labels
예제 #55
0
def test_json_view_data():

    # simple simulated data for stat_img and background
    bg_img, data = _simulate_img()
    stat_map_img, data = _simulate_img()

    # make a mask
    mask_img = new_img_like(stat_map_img, data > 0, stat_map_img.affine)

    # Get color bar and data ranges
    colors = colorscale('cold_hot', data.ravel(), threshold=0,
                        symmetric_cmap=True, vmax=1)

    # Build a sprite
    json_view = html_stat_map._json_view_data(
        bg_img, stat_map_img, mask_img, bg_min=0, bg_max=1, colors=colors,
        cmap='cold_hot', colorbar=True)

    # Check the presence of critical fields
    assert isinstance(json_view['bg_base64'], _basestring)
    assert isinstance(json_view['stat_map_base64'], _basestring)
    assert isinstance(json_view['cm_base64'], _basestring)

    return json_view, data
예제 #56
0
def plot_wholebrain_test_stat(path, roi_img, name, type='contrast', l1=0, l2=1,
                              plot=True, save=None, burn=0.5, verbose=False,
                              invert=False, **kwargs):
    ''' Plot cluster-by-cluster z-scores across the whole brain for a
    paired-samples contrast between the named effects n1 and n2.
    Args:
            path (str): path to all pickled PyMC3 trace results (one file per
                    region or cluster).
            roi_img (str): path to map containing cluster/region labels.
            name (str): name of factor whose levels we want to compare.
            type (str): whether to search for the named factor as a 'contrast'
                or as a 'term'.
            l1 (int): index of first level to compare
            l2 (int): index of second level to compare
            plot (bool): Whether or not to plot the generated image.
            save (str): Optional filename to write the resulting image to.
            burn (float or int): burn-in (i.e., number of samples to drop
                before comparison). If a float, interpreted as a proportion of
                all available samples. If an int, interpreted as the number of
                samples to drop.
            invert (bool): whether to invert the sign of the test statistic
            kwargs (dict): Any additional keywords to pass on to the nilearn
                    plot_stat_map function.
    '''

    roi_img = nb.load(roi_img)          # Load label image
    label_map = roi_img.get_data().round().astype(int)
    result = np.zeros(roi_img.shape)        # Empty volume for results

    # Loop over region trace files
    roi_traces = glob(path)

    for rt in roi_traces:

        label = int(re.search('(\d+)', basename(rt)).group(1))
        obj = pickle.load(open(rt, 'rb'))

        if hasattr(obj, 'trace'):

            trace = obj.trace

            if not hasattr(trace, name):
                raise AttributeError("The MultiTrace object does not contain the "
                                     "variable '%s'. Please make sure you have "
                                     "the right name." % name)

            if isinstance(burn, float):
                _burn = round(len(trace[name]) * burn)
            else:
                _burn = burn

            samp1 = trace[name][_burn:, l1]
            if len(samp1.shape) > 1:
                samp1 = samp1.mean(1)

            if l2 is None:
                mu = samp1
            else:
                samp2 = trace[name][_burn:, l2]
                if len(samp2.shape) > 1:
                    samp2 = samp2.mean(1)
                mu = samp1 - samp2

            z = mu.mean() / mu.std()

        else:
            # if name not in obj['contrasts']:
            if False:
                raise AttributeError("The trace summary object does not contain the "
                                     "variable '%s'. Please make sure you have "
                                     "the right name." % name)
            if type == 'contrast':
                z = obj['contrasts'][name][2]
            else:
                z = obj['terms'][name]['z']

        # Assign value to the correct voxels in the output image
        if invert:
            z *= -1
        result[label_map == label] = z

    # Write out image
    output_img = new_img_like(roi_img, result, roi_img.affine)
    if save is not None:
        output_img.to_filename(save)

    # Plot
    if plot:
        plotting.plot_stat_map(output_img, **kwargs)

    return output_img
예제 #57
0
log_p_values = -np.log10(p_values)
# NAN values to zero
log_p_values[np.isnan(log_p_values)] = 0.
log_p_values[log_p_values > 10.] = 10.

# Visualize statistical p-values using plotting function `plot_stat_map`
from nilearn.plotting import plot_stat_map

# Before visualizing, we transform the computed p-values to Nifti-like image
# using function `new_img_like` from nilearn.
from nilearn.image import new_img_like

# First argument being a reference image and second argument should be p-values
# data to convert to a new image as output. This new image will have same header
# information as reference image.
log_p_values_img = new_img_like(fmri_img, log_p_values)

# Now, we visualize log p-values image on functional mean image as background
# with coordinates given manually and colorbar on the right side of plot (by
# default colorbar=True)
plot_stat_map(log_p_values_img, mean_img,
              title="p-values", cut_coords=cut_coords)

#############################################################################
# **Selecting features using f_classif**: Feature selection method is also
# available in the scikit-learn Python package, where it has been extended to
# several classes, using the `sklearn.feature_selection.f_classif` function.

##############################################################################
# Build a mask from this statistical map (Improving the quality of the mask)
# --------------------------------------------------------------------------
l, n = find_region_names_using_cut_coords(dmn_coords, atlas_img,
                                          labels=labels)

# where 'l' indicates new labels generated according to given atlas labels and
# coordinates
new_labels = l

# where 'n' indicates brain regions names labelled, generated according to given
# labels
region_names_involved = n

######################################################################
# Let's visualize
from nilearn.image import load_img
from nilearn import plotting
from nilearn.image import new_img_like

atlas_img = load_img(atlas_img)
affine = atlas_img.get_affine()
atlas_data = atlas_img.get_data()

for i, this_label in enumerate(new_labels):
    this_data = (atlas_data == this_label)
    this_data = this_data.astype(int)
    this_img = new_img_like(atlas_img, this_data, affine)
    plotting.plot_roi(this_img, cut_coords=dmn_coords[i],
                      title=region_names_involved[i])

plotting.show()
예제 #59
0
def cast_img(img, dtype=np.float32):
    """ Cast image to the specified dtype"""
    img = check_niimg(img)
    img_data = img.get_data().astype(dtype)
    return new_img_like(img, img_data, copy_header=True)