Exemplo n.º 1
0
def test_mask_4d():
    # Dummy mask
    mask = np.zeros((10, 10, 10), dtype=int)
    mask[3:7, 3:7, 3:7] = 1
    mask_bool = mask.astype(bool)
    mask_img = Nifti1Image(mask, np.eye(4))

    # Dummy data
    data = np.zeros((10, 10, 10, 3), dtype=int)
    data[..., 0] = 1
    data[..., 1] = 2
    data[..., 2] = 3
    data_img_4d = Nifti1Image(data, np.eye(4))
    data_imgs = [index_img(data_img_4d, 0), index_img(data_img_4d, 1),
                 index_img(data_img_4d, 2)]

    # check whether transform is indeed selecting niimgs subset
    sample_mask = np.array([0, 2])
    masker = NiftiMasker(mask_img=mask_img, sample_mask=sample_mask)
    masker.fit()
    data_trans = masker.transform(data_imgs)
    data_trans_img = index_img(data_img_4d, sample_mask)
    data_trans_direct = data_trans_img.get_data()[mask_bool, :]
    data_trans_direct = np.swapaxes(data_trans_direct, 0, 1)
    assert_array_equal(data_trans, data_trans_direct)

    masker = NiftiMasker(mask_img=mask_img, sample_mask=sample_mask)
    masker.fit()
    data_trans2 = masker.transform(data_img_4d)
    assert_array_equal(data_trans2, data_trans_direct)
Exemplo n.º 2
0
    def _post_run_hook(self, runtime):
        self._fixed_image_label = "after"
        self._moving_image_label = "before"
        self._fixed_image = index_img(self.aggregate_outputs(runtime=runtime).out_corrected, 0)
        self._moving_image = index_img(self.inputs.in_files[0], 0)
        self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
        NIWORKFLOWS_LOG.info('Report - setting corrected (%s) and warped (%s) images',
                             self._fixed_image, self._moving_image)

        return super(ApplyTOPUPRPT, self)._post_run_hook(runtime)
Exemplo n.º 3
0
def ica_vis(subj_num):
  # Use the mean as a background
  mean_img_1 = image.mean_img(BOLD_file_1)
  mean_img_2 = image.mean_img(BOLD_file_2)
  mean_img_3 = image.mean_img(BOLD_file_3)

  plot_stat_map(image.index_img(component_img_1, 5), mean_img_1, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task001_run001'+'ica_1'+'.jpg'))
  plot_stat_map(image.index_img(component_img_1, 12), mean_img_1, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task001_run001'+'ica_2'+'.jpg'))

  plot_stat_map(image.index_img(component_img_2, 5), mean_img_2, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task002_run001'+'ica_1'+'.jpg'))
  plot_stat_map(image.index_img(component_img_2, 12), mean_img_2, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task002_run001'+'ica_2'+'.jpg'))
Exemplo n.º 4
0
def __main__():
    volume = image.index_img("E:\\Users\\Niall\\Documents\\Computer Science\\FinalYearProject\\data\\ds105_raw\\ds105\\sub001\\BOLD\\task001_run001\\bold.nii.gz", 0)
    smoothed_img = image.smooth_img(volume, fwhm=5)

    # print("Read the images");

    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=True, display_mode='xz')
    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=False, display_mode='xz')

    plt.show()

    # print("Finished");



    #gemerate some numbers
    t = np.linspace(1, 10, 2000)  # 2000 points between 1 and 10
    t

    #plot the graph
    plt.plot(t, np.cos(t))
    plt.ylabel('Subject Response')
    plt.show()
Exemplo n.º 5
0
def concat_RL(R_img, L_img, rl_idx_pair, rl_sign_pair=None):
    """
    Given R and L ICA images and their component index pairs, concatenate images to
    create bilateral image using the index pairs. Sign flipping can be specified in rl_sign_pair.

    """
    # Make sure images have same number of components and indices are less than the n_components
    assert R_img.shape == L_img.shape
    n_components = R_img.shape[3]
    assert np.max(rl_idx_pair) < n_components
    n_rl_imgs = len(rl_idx_pair[0])
    assert n_rl_imgs == len(rl_idx_pair[1])
    if rl_sign_pair:
        assert n_rl_imgs == len(rl_sign_pair[0])
        assert n_rl_imgs == len(rl_sign_pair[1])

    # Match indice pairs and combine
    terms = R_img.terms.keys()
    rl_imgs = []
    rl_term_vals = []

    for i in range(n_rl_imgs):
        rci, lci = rl_idx_pair[0][i], rl_idx_pair[1][i]
        R_comp_img = index_img(R_img, rci)
        L_comp_img = index_img(L_img, lci)

        # sign flipping
        r_sign = rl_sign_pair[0][i] if rl_sign_pair else 1
        l_sign = rl_sign_pair[1][i] if rl_sign_pair else 1

        R_comp_img = math_img("%d*img" % (r_sign), img=R_comp_img)
        L_comp_img = math_img("%d*img" % (l_sign), img=L_comp_img)

        # combine images
        rl_imgs.append(math_img("r+l", r=R_comp_img, l=L_comp_img))

        # combine terms
        if terms:
            r_ic_terms, r_ic_term_vals = get_ic_terms(R_img.terms, rci, sign=r_sign)
            l_ic_terms, l_ic_term_vals = get_ic_terms(L_img.terms, lci, sign=l_sign)
            rl_term_vals.append((r_ic_term_vals + l_ic_term_vals) / 2)

    # Squash into single image
    concat_img = nib.concat_images(rl_imgs)
    if terms:
        concat_img.terms = dict(zip(terms, np.asarray(rl_term_vals).T))
    return concat_img
Exemplo n.º 6
0
    def openBold(self):
        file = self.onOpen([('NIFTI files', '*.nii.gz'), ('All files', '*')])
        print("anatomy file: " + file)

        bold = image.index_img(file, 0)

        plotting.plot_glass_brain(bold, title='glass_brain',
    black_bg=True, display_mode='ortho')
        plt.show()
Exemplo n.º 7
0
 def mean_nodiff_fct(in_file, bval_file, nodiff_b=0):
     import os, numpy as np
     from nilearn import image
     bvals = np.loadtxt(bval_file)
     nodiff_index = bvals <= nodiff_b
     nodiff_img = image.index_img(in_file, nodiff_index)
     mean_nodiff_img = image.mean_img(nodiff_img)
     out_file = os.path.abspath('mean_nodiff.nii.gz')
     mean_nodiff_img.to_filename(out_file)
     return out_file
Exemplo n.º 8
0
    def plotCorrelationResults(self, background, overlay, desired_resolution_file_location):
        image_to_resample = nibabel.load(background)
        image_to_use_for_sample = image.index_img(desired_resolution_file_location, 0)
        resampled_background = resample_img(image_to_resample,target_affine = image_to_use_for_sample.get_affine(), target_shape=image_to_use_for_sample.shape)

        mri_args = {
            'background' : resampled_background,
            'cmap_bg' : 'gray',
            'cmap_overlay' : 'PiYG', # YlOrRd_r # pl.cm.autumn
            'interactive' : cfg.getboolean('examples', 'interactive', True),
            }

        plot_lightbox(overlay=overlay, vlim=(-1.0, 1.0), do_stretch_colors=True, **mri_args)
Exemplo n.º 9
0
def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames):
    output_dir = join(trace_folder, 'experiment_%i' % idx)
    try:
        os.makedirs(output_dir)
    except OSError:
        pass
    dict_fact = SpcaFmri(mask=mask,
                         smoothing_fwhm=3,
                         batch_size=40,
                         shelve=not raw,
                         n_components=n_components,
                         replacement=False,
                         dict_init=fetch_atlas_smith_2009().rsn70 if
                         init else None,
                         reduction=reduction,
                         alpha=alpha,
                         random_state=0,
                         n_epochs=2,
                         l1_ratio=0.5,
                         backend='c',
                         memory=expanduser("~/nilearn_cache"), memory_level=2,
                         verbose=5,
                         n_jobs=1,
                         trace_folder=output_dir
                         )

    print('[Example] Learning maps')
    t0 = time.time()
    dict_fact.fit(func_filenames, raw=raw)
    t1 = time.time() - t0
    print('[Example] Dumping results')
    # Decomposition estimator embeds their own masker
    masker = dict_fact.masker_
    components_img = masker.inverse_transform(dict_fact.components_)
    components_img.to_filename(join(output_dir, 'components_final.nii.gz'))
    print('[Example] Run in %.2f s' % t1)
    # Show components from both methods using 4D plotting tools
    import matplotlib.pyplot as plt
    from nilearn.plotting import plot_prob_atlas, show

    print('[Example] Displaying')
    fig, axes = plt.subplots(2, 1)
    plot_prob_atlas(components_img, view_type="filled_contours",
                    axes=axes[0])
    plot_stat_map(index_img(components_img, 0),
                  axes=axes[1],
                  colorbar=False,
                  threshold=0)
    plt.savefig(join(output_dir, 'components.pdf'))
    show()
Exemplo n.º 10
0
def test_appy_realigment_and_extract_realignment_params_APIs():
    # setu
    n_scans = 10
    translation = np.array([1, 2, 3])  # mm
    rotation = np.array([3, 2, 1])  # degrees

    # create data
    affine = np.array([[-3., 0., 0., 96.],
                       [0., 3., 0., -96.],
                       [0., 0., 3., -69.],
                       [0., 0., 0., 1.]])
    film = create_random_image(shape=[16, 16, 16, n_scans], affine=affine)

    # there should be no motion
    for t in range(n_scans):
        np.testing.assert_array_equal(
            extract_realignment_params(index_img(film, t), index_img(film, 0)),
            get_initial_motion_params())

    # now introduce motion into other vols relative to the first vol
    rp = np.ndarray((n_scans, 12))
    for t in range(n_scans):
        rp[t, ...] = get_initial_motion_params()
        rp[t, :3] += _make_vol_specific_translation(translation, n_scans, t)
        rp[t, 3:6] += _make_vol_specific_rotation(rotation, n_scans, t)

    # apply motion (noise)
    film = apply_realignment(film, rp)

    # check that motion has been induced
    for t in range(n_scans):
        _tmp = get_initial_motion_params()
        _tmp[:3] += _make_vol_specific_translation(translation, n_scans, t)
        _tmp[3:6] += _make_vol_specific_rotation(rotation, n_scans, t)

        np.testing.assert_array_almost_equal(
            extract_realignment_params(film[t], film[0]), _tmp)
Exemplo n.º 11
0
def _3d_in_file(in_file):
    ''' if self.inputs.in_file is 3d, return it.
    if 4d, pick an arbitrary volume and return that.

    if in_file is a list of files, return an arbitrary file from
    the list, and an arbitrary volume from that file
    '''

    in_file = filemanip.filename_to_list(in_file)[0]

    try:
        in_file = nb.load(in_file)
    except AttributeError:
        in_file = in_file

    if in_file.get_data().ndim == 3:
        return in_file

    return nlimage.index_img(in_file, 0)
Exemplo n.º 12
0
    def display_results(self):

        #load the high-res anatomy - this needs to be downsampled to fit the resolution of our data
        original_anatomy = nibabel.load(self.processing_model.processing_request_model.anatomy_location)

        #we will use the first volumne/sample from the bold data to 
        bold_data_sample = image.index_img(self.processing_model.processing_request_model.bold_location, 0)

        #resample the original anatomy to meet the bold data
        resampled_anatomy = resample_img(original_anatomy, target_affine = bold_data_sample.get_affine(), target_shape=bold_data_sample.shape)

        mri_args = {
            'background' : resampled_anatomy,
            'cmap_bg' : 'gray',
            'cmap_overlay' : 'PiYG', # YlOrRd_r # pl.cm.autumn
            'interactive' : cfg.getboolean('examples', 'interactive', True),
            }

        self.result_view.plot_results(self.processing_model.result, mri_args)
Exemplo n.º 13
0
    def create_imaging_pic(file_info):
        """
        Creates the preview pic that will show in the imaging browser view session
        page. This pic will be stored in the data_dir/pic folder

        :param file_info: dictionary with file information (path, file_id, cand_id...)
         :type file_info: dict

        :return: path to the created pic
         :rtype: str
        """

        cand_id    = file_info['cand_id']
        file_path  = file_info['data_dir_path'] + file_info['file_rel_path']
        is_4d_data = file_info['is_4D_dataset']
        file_id    = file_info['file_id']

        pic_name     = os.path.basename(file_path)
        pic_name     = re.sub(r"\.nii(\.gz)", '_' + str(file_id) + '_check.png', pic_name)
        pic_rel_path = str(cand_id) + '/' + pic_name

        # create the candID directory where the pic will go if it does not already exist
        pic_dir = file_info['data_dir_path'] + 'pic/' + str(cand_id)
        if not os.path.exists(pic_dir):
            os.mkdir(pic_dir)

        volume = image.index_img(file_path, 0) if is_4d_data else file_path

        nilearn.plotting.plot_anat(
            anat_img     = volume,
            output_file  = file_info['data_dir_path'] + 'pic/' + pic_rel_path,
            display_mode = 'ortho',
            black_bg     = 1,
            draw_cross   = 0,
            annotate     = 0
        )

        return pic_rel_path
Exemplo n.º 14
0
def get_roi_mask(atlas_maps, index_mask, labels, path=None, global_mask=None):
    """Return the Niftimasker object for a given ROI based on an atlas.
    Optionally resampled based on a global masker.  
    """
    if path is None:
        path = os.path.join(PROJECT_PATH, 'derivatives/fMRI/ROI_masks', labels[index_mask+1])
        check_folder(path)
    if os.path.exists(path + '.nii.gz') and os.path.exists(path + '.yml'):
        masker = load_masker(path)
    else:
        mask = math_img('img > 50', img=index_img(atlas_maps, index_mask))
        if global_mask:
            global_masker = nib.load(global_mask + '.nii.gz')
            mask = resample_to_img(mask, global_masker, interpolation='nearest')
            params = read_yaml(global_mask + '.yml')
            params['detrend'] = False
            params['standardize'] = False
        masker = NiftiMasker(mask)
        if global_mask:
            masker.set_params(**params)
        masker.fit()
        save_masker(masker, path)
    return masker
Exemplo n.º 15
0
def nii_to_image_converter(write_dir, label, **template_dict):
    """This function converts nifti to base64 string"""
    import nibabel as nib
    from nilearn import plotting, image
    import os, base64

    file = glob.glob(os.path.join(write_dir, template_dict['display_nifti']))
    mask = image.index_img(file[0], int(
        (image.load_img(file[0]).shape[3]) / 2))
    new_data = mask.get_data()

    clipped_img = nib.Nifti1Image(new_data, mask.affine, mask.header)

    plotting.plot_anat(
        clipped_img,
        cut_coords=(0, 0, 0),
        annotate=False,
        draw_cross=False,
        output_file=os.path.join(write_dir,
                                 template_dict['display_image_name']),
        display_mode='ortho',
        title=label + ' ' + template_dict['display_pngimage_name'],
        colorbar=False)
Exemplo n.º 16
0
def _3d_in_file(in_file):
    """ if self.inputs.in_file is 3d, return it.
    if 4d, pick an arbitrary volume and return that.

    if in_file is a list of files, return an arbitrary file from
    the list, and an arbitrary volume from that file
    """
    from nipype.utils import filemanip

    if not isinstance(in_file, nib.filebasedimages.SerializableImage):
        in_file = filemanip.filename_to_list(in_file)[0]

    try:
        in_file = nib.load(in_file)
    except AttributeError:
        in_file = in_file
    except TypeError:
        in_file = in_file

    if len(in_file.shape) == 3:
        return in_file

    return nimg.index_img(in_file, 0)
Exemplo n.º 17
0
def downsample_test_data(prefix):
    """Downsampling fmriprep output to create testdata.

    Resolution reduced to 33% of the original.
    Only the first 30 volumes are kept.
    The function is here for book keeping as the real data is not included in the repository.
    The downsampling procedure is modified from on MAIN nilearn tutorial data https://osf.io/wjtyq/

    Parameter
    ---------
    prefix : str
        file prefix

    """
    data = (
        get_test_data_path() / "fmriprep" /
        f"{prefix}_task-rest_acq-645_space-MNI152NLin2009cAsym_res-2_desc-preproc_bold.nii.gz"
    )
    mask = (
        get_test_data_path() / "fmriprep" /
        f"{prefix}_task-rest_acq-645_space-MNI152NLin2009cAsym_res-2_desc-brain_mask.nii.gz"
    )
    mask = check_niimg(str(mask), atleast_4d=True)
    brain = image.index_img(str(data), slice(0, 30))
    brain = image.math_img("img1*img2", img1=brain, img2=mask)
    aff_orig = nb.load(str(data)).affine[:, -1]
    target_affine = np.column_stack([np.eye(4, 3) * 6, aff_orig])
    downsample_data = image.resample_img(brain,
                                         target_affine=target_affine,
                                         target_shape=(33, 39, 33))
    downsample_data.set_data_dtype("int8")
    nb.save(
        downsample_data,
        str(get_test_data_path() / "fmriprep" /
            f"{prefix}_task-rest_downsample.nii.gz"),
    )
Exemplo n.º 18
0
    def load_mask(self, mask_dir, identifier=None, mult_roi_atlases=None):
        '''
        Find and load ROI masks.

        Mult_roi_atlases should take the form of a dict of dicts.
        Outerkey: substring to identify atlas, innerkey: frame within that 4D Nifty, value: name of that ROI.
        '''
        mask_dir = expanduser(mask_dir)
        if identifier is None:
            files = glob(join(mask_dir, '*.nii*'))
        else:
            files = glob(join(mask_dir, '*{}*.nii*'.format(identifier)))
        if mult_roi_atlases is not None:
            files = [
                r for r in files
                if all(z not in r for z in mult_roi_atlases.keys())
            ]
            for key, value in mult_roi_atlases.items():
                atlas = glob(join(mask_dir, '*{}*.nii*'.format(key)))
                for k, v in value.items():
                    self.masks[v] = image.index_img(atlas[0], k)
        for file in files:
            name = file[len(mask_dir) + 1:file.find('.nii')]
            self.masks[name] = image.load_img(file)
Exemplo n.º 19
0
    def test_multi_subject_resampling(self):
        voxel_size = [3, 3, 3]

        # nilearn
        from nilearn.image import resample_img, index_img
        nilearn_resampled = resample_img(self.X[:3], interpolation='nearest', target_affine = np.diag(voxel_size))
        nilearn_resampled_img = [index_img(nilearn_resampled, i) for i in range(nilearn_resampled.shape[-1])]
        nilearn_resampled_array = np.moveaxis(nilearn_resampled.dataobj, -1, 0)

        # photon
        resampler = PipelineElement('ResampleImages', hyperparameters={}, voxel_size=voxel_size)
        resampled_img, _, _ = resampler.transform(self.X[:3])

        branch = NeuroBranch('NeuroBranch', output_img=True)
        branch += resampler
        branch_resampled_img, _, _ = branch.transform(self.X[:3])

        # assert
        self.assertIsInstance(resampled_img, np.ndarray)
        self.assertIsInstance(branch_resampled_img, list)
        self.assertIsInstance(branch_resampled_img[0], Nifti1Image)

        self.assertTrue(np.array_equal(nilearn_resampled_array, resampled_img))
        self.assertTrue(np.array_equal(branch_resampled_img[1].dataobj, nilearn_resampled_img[1].dataobj))
Exemplo n.º 20
0
  def get_vol_from_vfm(self,idx):		
    """ 		
    Convenience method to return nifti volume for an entry in the vfm table		
    """		
 	
    nii_file = self.vfms.ix[idx]['nii_file']		
    volnum = self.vfms.ix[idx]['4dvolind']		
    		
    if not os.path.isfile(nii_file):  		
      candidate = os.path.join(self.atlas_dir,nii_file)		
      if os.path.isfile(candidate): 		
        nii_file = candidate		
      else: 		
          Exception('File not found')		
    		
    if os.path.isfile(nii_file):		
      if (np.isnan(volnum) or volnum == 'nan'):		
        print 'getting atlas entry %s: image file %s'  %(idx,nii_file)		
        img = nib.load(nii_file)		
      else:		
        print 'getting atlas entry %s: volume %s from image file %s'  %(idx,volnum,nii_file)		
        img = index_img(nii_file,volnum)		
 		
    return img
Exemplo n.º 21
0
def _plot_glass_brain(
    nifti,
    pdfpath,
    index=1
):  #TODO: do we need a separate function for this?  doesn't look more convenient than calling plot_glas_brain directly...
    """
    Plots nifti data

    Parameters
    ----------
    nifti : nifti image
        Nifti image to plot

    Returns
    ----------
    results: nilearn plot_glass_brain
        plot data


    """
    nii = image.index_img(nifti, index)
    ni_plt.plot_glass_brain(nii)
    if not pdfpath:
        ni_plt.show()
Exemplo n.º 22
0
    def prepare_inputs(self, num_std_dev=1.5):
        """Helper function to creating temporary nii's and prepare inputs from
         time-series extraction"""
        import os.path as op
        import nibabel as nib
        from nilearn.image import math_img, index_img, resample_to_img
        from nilearn.masking import intersect_masks

        if not op.isfile(self.func_file):
            raise FileNotFoundError(
                "\nFunctional data input not found! Check that the"
                " file(s) specified with the -i "
                "flag exist(s)")

        if self.conf:
            if not op.isfile(self.conf):
                raise FileNotFoundError(
                    "\nConfound regressor file not found! Check "
                    "that the file(s) specified with the -conf flag "
                    "exist(s)")

        self._func_img = nib.load(self.func_file)
        self._func_img.set_data_dtype(np.float32)

        func_vol_img = index_img(self._func_img, 1)
        func_vol_img.set_data_dtype(np.uint16)
        func_data = np.asarray(func_vol_img.dataobj, dtype=np.float32)
        func_int_thr = np.round(
            np.mean(func_data[func_data > 0]) -
            np.std(func_data[func_data > 0]) * num_std_dev,
            3,
        )
        hdr = self._func_img.header

        self._net_parcels_map_nifti = nib.load(self.net_parcels_nii_path,
                                               mmap=True)
        self._net_parcels_map_nifti.set_data_dtype(np.int16)

        if self.hpass:
            if len(hdr.get_zooms()) == 4:
                self._t_r = float(hdr.get_zooms()[-1])
            else:
                self._t_r = None
        else:
            self._t_r = None

        if self.hpass is not None:
            if float(self.hpass) > 0:
                self.hpass = float(self.hpass)
                self._detrending = False
            else:
                self.hpass = None
                self._detrending = True
        else:
            self.hpass = None
            self._detrending = True

        if self.mask is not None:
            # Ensure mask is binary and contains only voxels that also
            # overlap with the parcellation and first functional volume
            self._mask_img = intersect_masks(
                [
                    math_img(f"img > {func_int_thr}", img=func_vol_img),
                    math_img("img > 0.0001",
                             img=resample_to_img(nib.load(self.mask),
                                                 func_vol_img))
                ],
                threshold=1,
                connected=False,
            )
            self._mask_img.set_data_dtype(np.uint16)
        else:
            print("Warning: Proceeding to extract time-series without a "
                  "brain mask...")
            self._mask_img = None

        if self.smooth:
            if float(self.smooth) > 0:
                print(f"Smoothing FWHM: {self.smooth} mm\n")

        if self.hpass:
            print(f"Applying high-pass filter: {self.hpass} Hz\n")

        return
Exemplo n.º 23
0
def feature_spatial(mel_IC):
    """Extract the spatial feature scores.

    For each IC it determines the fraction of the mixture modeled thresholded
    Z-maps respectively located within the CSF or at the brain edges,
    using predefined standardized masks.

    Parameters
    ----------
    mel_IC : str or niimg_like
        Full path of the nii.gz file containing mixture-modeled thresholded
        (p<0.5) Z-maps, registered to the MNI152 2mm template

    Returns
    -------
    edge_fract : array_like
        Array of the edge fraction feature scores for the components of the
        mel_IC file
    csf_fract : array_like
        Array of the CSF fraction feature scores for the components of the
        mel_IC file
    """
    # Get the number of ICs
    mel_IC_img = load_niimg(mel_IC)
    num_ICs = mel_IC_img.shape[3]

    masks_dir = utils.get_resource_path()
    csf_mask = os.path.join(masks_dir, "mask_csf.nii.gz")
    edge_mask = os.path.join(masks_dir, "mask_edge.nii.gz")
    out_mask = os.path.join(masks_dir, "mask_out.nii.gz")

    # Loop over ICs
    edge_fract = np.zeros(num_ICs)
    csf_fract = np.zeros(num_ICs)
    for i in range(num_ICs):
        # Extract IC from the merged melodic_IC_thr2MNI2mm file
        temp_IC = image.index_img(mel_IC, i)

        # Change to absolute Z-values
        temp_IC = image.math_img("np.abs(img)", img=temp_IC)

        # Get sum of Z-values within the total Z-map (calculate via the mean
        # and number of non-zero voxels)
        temp_IC_data = temp_IC.get_fdata()
        tot_sum = np.sum(temp_IC_data)

        if tot_sum == 0:
            LGR.info("\t- The spatial map of component {} is empty. "
                     "Please check!".format(i + 1))

        # Get sum of Z-values of the voxels located within the CSF
        # (calculate via the mean and number of non-zero voxels)
        csf_data = masking.apply_mask(temp_IC, csf_mask)
        csf_sum = np.sum(csf_data)

        # Get sum of Z-values of the voxels located within the Edge
        # (calculate via the mean and number of non-zero voxels)
        edge_data = masking.apply_mask(temp_IC, edge_mask)
        edge_sum = np.sum(edge_data)

        # Get sum of Z-values of the voxels located outside the brain
        # (calculate via the mean and number of non-zero voxels)
        out_data = masking.apply_mask(temp_IC, out_mask)
        out_sum = np.sum(out_data)

        # Determine edge and CSF fraction
        if tot_sum != 0:
            edge_fract[i] = (out_sum + edge_sum) / (tot_sum - csf_sum)
            csf_fract[i] = csf_sum / tot_sum
        else:
            edge_fract[i] = 0
            csf_fract[i] = 0

    # Return feature scores
    return edge_fract, csf_fract
Exemplo n.º 24
0
# Plot results

# load fsaverage brain (Static)
t1_s_img = nib.load(t1_fsaverage)

# reslice Static
t1_s_img_res, t1_s_img_res_affine = reslice.reslice(
    t1_s_img.get_data(),
    t1_s_img.affine,
    t1_s_img.header.get_zooms()[:3],
    voxel_size)

t1_s_img_res = nib.Nifti1Image(t1_s_img_res, t1_s_img_res_affine)

# select image overlay
imgs = [index_img(img_vol_to_preC, 0), index_img(img_vol_to_drct, 0)]

# select anatomical background images
t1_imgs = [t1_s_img_res, t1_s_img_res]

# slices to show for Static volume
slices_s = (0, 0, 0)

slices = [slices_s, slices_s]

# define titles for plots
titles = ['fsaverage brain precomputed', 'fsaverage brain direct morph']

# plot results
figure, (axes1, axes2) = plt.subplots(2, 1)
figure.subplots_adjust(top=0.8, left=0.1, right=0.9, hspace=0.5)
Exemplo n.º 25
0
### Load Target labels ########################################################
import numpy as np
labels = np.recfromcsv(data_files.session_target[0], delimiter=" ")

### Split data into train and test samples ####################################
target = labels['labels']
condition_mask = np.logical_or(target == "face", target == "house")
condition_mask_train = np.logical_and(condition_mask, labels['chunks'] <= 6)
condition_mask_test = np.logical_and(condition_mask, labels['chunks'] > 6)

# make X (design matrix) and y (response variable)
import nibabel
from nilearn.image import index_img
niimgs = nibabel.load(data_files.func[0])
X_train = index_img(niimgs, condition_mask_train)
X_test = index_img(niimgs, condition_mask_test)
y_train = target[condition_mask_train]
y_test = target[condition_mask_test]

### Loop over Graph-Net and TV-L1 penalties ##################################
from nilearn.decoding import SpaceNetClassifier
import matplotlib.pyplot as plt
from nilearn.image import mean_img
from nilearn.plotting import plot_stat_map
background_img = mean_img(data_files.func[0])
for penalty in ['graph-net', 'tv-l1']:
    ### Fit model on train data and predict on test data ######################
    decoder = SpaceNetClassifier(memory="cache", penalty=penalty)
    decoder.fit(X_train, y_train)
    y_pred = decoder.predict(X_test)
Exemplo n.º 26
0
from sklearn.decomposition import FastICA
n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(data_masked.T).T

# Normalize estimated components, for thresholding to make sense
components_masked -= components_masked.mean(axis=0)
components_masked /= components_masked.std(axis=0)
# Threshold
components_masked[components_masked < .8] = 0

# Now invert the masking operation, going back to a full 3D
# representation
component_img = masker.inverse_transform(components_masked)

### Visualize the results #####################################################
# Show some interesting components
import matplotlib.pyplot as plt
from nilearn import image
from nilearn.plotting import plot_stat_map

# Use the mean as a background
mean_img = image.mean_img(func_filename)

plot_stat_map(image.index_img(component_img, 5), mean_img)

plot_stat_map(image.index_img(component_img, 12), mean_img)

plt.show()
Exemplo n.º 27
0
# print basic information on the dataset
print('Anatomical nifti image (3D) is located at: %s' % haxby_dataset.mask)
print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func[0])

fmri_filename = haxby_dataset.func[0]
labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
y = labels['labels']
session = labels['chunks']

#########################################################################
# Restrict to faces and houses
from nilearn.image import index_img
condition_mask = np.logical_or(y == b'face', y == b'house')

fmri_img = index_img(fmri_filename, condition_mask)
y, session = y[condition_mask], session[condition_mask]

#########################################################################
# Prepare masks
#
# - mask_img is the original mask
# - process_mask_img is a subset of mask_img, it contains the voxels that
#   should be processed (we only keep the slice z = 26 and the back of the
#   brain to speed up computation)

mask_img = load_img(haxby_dataset.mask)

# .astype() makes a copy.
process_mask = mask_img.get_data().astype(np.int)
picked_slice = 29
Exemplo n.º 28
0
        gsc_nets = GraphLassoCV(verbose=2, alphas=20)
        gsc_nets.fit(FS_netproj)

        np.save('%i_nets_cov' % sub_id, gsc_nets.covariance_)
        np.save('%i_nets_prec' % sub_id, gsc_nets.precision_)
    except:
        pass

    ###############################################################################
    # dump region poolings
    ###############################################################################
    from nilearn.image import resample_img

    crad = ds.fetch_atlas_craddock_2012()
    # atlas_nii = index_img(crad['scorr_mean'], 19)  # Craddock 200 region atlas
    atlas_nii = index_img(crad['scorr_mean'], 9)  # Craddock 100 region atlas

    r_atlas_nii = resample_img(
        img=atlas_nii,
        target_affine=mask_file.get_affine(),
        target_shape=mask_file.shape,
        interpolation='nearest'
    )
    r_atlas_nii.to_filename('debug_ratlas.nii.gz')

    from nilearn.input_data import NiftiLabelsMasker
    nlm = NiftiLabelsMasker(
        labels_img=r_atlas_nii, mask_img=mask_file,
        standardize=True, detrend=True)

    nlm.fit()
Exemplo n.º 29
0
haxby_dataset = datasets.fetch_haxby_simple()

# print basic information on the dataset
print('Anatomical nifti image (3D) is located at: %s' % haxby_dataset.mask)
print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func)

fmri_filename = haxby_dataset.func
fmri_img = nibabel.load(fmri_filename)
y, session = np.loadtxt(haxby_dataset.session_target).astype('int').T
conditions = np.recfromtxt(haxby_dataset.conditions_target)['f0']

### Restrict to faces and houses ##############################################
from nilearn.image import index_img
condition_mask = np.logical_or(conditions == b'face', conditions == b'house')

fmri_img = index_img(fmri_img, condition_mask)
y, session = y[condition_mask], session[condition_mask]
conditions = conditions[condition_mask]

### Prepare masks #############################################################
# - mask_img is the original mask
# - process_mask_img is a subset of mask_img, it contains the voxels that
#   should be processed (we only keep the slice z = 26 and the back of the
#   brain to speed up computation)

mask_img = nibabel.load(haxby_dataset.mask)

# .astype() makes a copy.
process_mask = mask_img.get_data().astype(np.int)
picked_slice = 27
process_mask[..., (picked_slice + 1):] = 0
Exemplo n.º 30
0
plot_roi(masker.mask_img_, miyawaki_mean_img,
         title="Mask from already masked data")


###############################################################################
# From raw EPI data

# Load NYU resting-state dataset
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)
nyu_filename = nyu_dataset.func[0]
nyu_img = nibabel.load(nyu_filename)

# Restrict nyu to 100 frames to speed up computation
from nilearn.image import index_img
nyu_img = index_img(nyu_img, slice(0, 100))

# To display the background
nyu_mean_img = image.mean_img(nyu_img)


# Simple mask extraction from EPI images
# We need to specify an 'epi' mask_strategy, as this is raw EPI data
masker = NiftiMasker(mask_strategy='epi')
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask')

# Generate mask with strong opening
masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10))
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening')
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'

snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)

# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']

# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
stc.crop(0.0, 0.2)

# Export result as a 4D nifti object
img = stc.as_volume(src,
                    mri_resolution=False)  # set True for full MRI resolution

# Save it as a nifti file
# nib.save(img, 'mne_%s_inverse.nii.gz' % method)

t1_fname = data_path + '/subjects/sample/mri/T1.mgz'

# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=8.,
              title='%s (t=%.1f s.)' % (method, stc.times[61]))
plt.show()
Exemplo n.º 32
0
"""
Docstring.
"""
from nidata.multimodal import MyConnectome2015Dataset

# runs the member function 'fetch' from the class 'Laumann2015Dataset'
dataset = MyConnectome2015Dataset().fetch(data_types=['functional'],
                                          session_ids=range(2, 13))
print(dataset)

from nilearn.plotting import plot_anat
from nilearn.image import index_img
from matplotlib import pyplot as plt

plot_anat(index_img(dataset['functional'][0], 0))
plt.show()
Exemplo n.º 33
0
# Restrict to face and house conditions
target = labels["labels"]
condition_mask = np.logical_or(target == b"face", target == b"house")

# Split data into train and test samples, using the chunks
condition_mask_train = np.logical_and(condition_mask, labels["chunks"] <= 6)
condition_mask_test = np.logical_and(condition_mask, labels["chunks"] > 6)

# Apply this sample mask to X (fMRI data) and y (behavioral labels)
# Because the data is in one single large 4D image, we need to use
# index_img to do the split easily
from nilearn.image import index_img

func_filenames = data_files.func[0]
X_train = index_img(func_filenames, condition_mask_train)
X_test = index_img(func_filenames, condition_mask_test)
y_train = target[condition_mask_train]
y_test = target[condition_mask_test]

# Compute the mean epi to be used for the background of the plotting
from nilearn.image import mean_img

background_img = mean_img(func_filenames)

##############################################################################
# Fit SpaceNet with a Graph-Net penalty
from nilearn.decoding import SpaceNetClassifier

# Fit model on train data and predict on test data
decoder = SpaceNetClassifier(memory="nilearn_cache", penalty="graph-net")
Exemplo n.º 34
0
 def _get_img(self, network_index):
     """ Return one RSN given the index in the labels file."""
     img_idx = self._img_index(network_index)
     return niimg.index_img(self._img, img_idx)
Exemplo n.º 35
0
# As we can see from the targets above, the experiment contains many
# conditions. As a consequence, the data is quite big. Not all of this data
# has an interest to us for decoding, so we will keep only fmri signals
# corresponding to faces or cats. We create a mask of the samples belonging to
# the condition; this mask is then applied to the fmri data to restrict the
# classification to the face vs cat discrimination.
#
# The input data will become much smaller (i.e. fmri signal is shorter):
condition_mask = conditions.isin(['face', 'cat'])

###########################################################################
# Because the data is in one single large 4D image, we need to use
# index_img to do the split easily.
from nilearn.image import index_img

fmri_niimgs = index_img(fmri_filename, condition_mask)

###########################################################################
# We apply the same mask to the targets
conditions = conditions[condition_mask]
# Convert to numpy array
conditions = conditions.values
print(conditions.shape)

###########################################################################
# Decoding with Support Vector Machine
# ------------------------------------
#
# As a decoder, we use a Support Vector Classification, with a linear kernel.
# We first create it using by using :class:`nilearn.decoding.Decoder`.
from nilearn.decoding import Decoder
Exemplo n.º 36
0
# calculate mean image for the background
mean_func_img = mean_img(func_filename)

plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask")

### Preprocess data ###########################################################
nifti_masker.fit(func_filename)
fmri_masked = nifti_masker.transform(func_filename)

### Run an algorithm ##########################################################
from sklearn.decomposition import FastICA

n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(fmri_masked.T).T

### Reverse masking ###########################################################
components = nifti_masker.inverse_transform(components_masked)

### Show results ##############################################################
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img

plot_stat_map(index_img(components, 0),
              mean_func_img,
              display_mode='y',
              cut_coords=4,
              title="Component 0")

plt.show()
Exemplo n.º 37
0
from nilearn.plotting import plot_roi
from nilearn.image.image import mean_img

# calculate mean image for the background
mean_func_img = mean_img(func_filename)

plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask")


### Preprocess data ###########################################################
nifti_masker.fit(func_filename)
fmri_masked = nifti_masker.transform(func_filename)

### Run an algorithm ##########################################################
from sklearn.decomposition import FastICA
n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(fmri_masked.T).T

### Reverse masking ###########################################################
components = nifti_masker.inverse_transform(components_masked)

### Show results ##############################################################
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img

plot_stat_map(index_img(components, 0), mean_func_img, display_mode='y',
              cut_coords=4, title="Component 0")

plt.show()
Exemplo n.º 38
0
masker.fit(miyawaki_filename)

plot_roi(masker.mask_img_, miyawaki_mean_img,
         title="Mask from already masked data")


###############################################################################
# From raw EPI data

# Load NYU resting-state dataset
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)
nyu_filename = nyu_dataset.func[0]

# Restrict nyu to 100 frames to speed up computation
from nilearn.image import index_img
nyu_img = index_img(nyu_filename, slice(0, 100))

# To display the background
nyu_mean_img = image.mean_img(nyu_img)


# Simple mask extraction from EPI images
# We need to specify an 'epi' mask_strategy, as this is raw EPI data
masker = NiftiMasker(mask_strategy='epi')
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask')

# Generate mask with strong opening
masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10))
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening')
    print("[Example] Learning maps using %s model" % names[estimator])
    estimator.fit(func_filenames)
    print("[Example] Saving results")
    # Decomposition estimator embeds their own masker
    masker = estimator.masker_
    # Drop output maps to a Nifti   file
    components_img = masker.inverse_transform(estimator.components_)
    components_img.to_filename("%s_resting_state.nii.gz" % names[estimator])
    components_imgs.append(components_img)

###############################################################################
# Visualize the results
from nilearn.plotting import plot_prob_atlas, find_xyz_cut_coords, show, plot_stat_map
from nilearn.image import index_img

# Selecting specific maps to display: maps were manually chosen to be similar
indices = {dict_learning: 1, canica: 31}
# We select relevant cut coordinates for displaying
cut_component = index_img(components_imgs[0], indices[dict_learning])
cut_coords = find_xyz_cut_coords(cut_component)
for estimator, components in zip(estimators, components_imgs):
    # 4D plotting
    plot_prob_atlas(
        components, view_type="filled_contours", title="%s" % names[estimator], cut_coords=cut_coords, colorbar=False
    )
    # 3D plotting
    plot_stat_map(
        index_img(components, indices[estimator]), title="%s" % names[estimator], cut_coords=cut_coords, colorbar=False
    )
show()
Exemplo n.º 40
0
report

###############################################################################
# Computing a mask from raw EPI data
###############################################################################
#
# From raw EPI data, there is no uniform background, and a different
# strategy is necessary

# Load movie watching based brain development fmri dataset
dataset = datasets.fetch_development_fmri(n_subjects=1)
epi_filename = dataset.func[0]

# Restrict to 100 frames to speed up computation
from nilearn.image import index_img
epi_img = index_img(epi_filename, slice(0, 100))

# To display the background
mean_img = image.mean_img(epi_img)
plot_epi(mean_img, title='Mean EPI image')

###############################################################################
# Simple mask extraction from EPI images
# We need to specify an 'epi' mask_strategy, as this is raw EPI data
masker = NiftiMasker(mask_strategy='epi')
masker.fit(epi_img)
report = masker.generate_report()
report

###############################################################################
# Generate mask with strong opening
# -----------------------------------
#
# We can download resting-state networks from the Smith 2009 study on
# correspondance between rest and task
rsn = datasets.fetch_atlas_smith_2009()['rsn10']
print(rsn)

###############################################################################
# It is a 4D nifti file. We load it into the memory to print its
# shape.
from nilearn import image
print(image.load_img(rsn).shape)

###############################################################################
# We can retrieve the first volume (note that Python indexing starts at 0):
first_rsn = image.index_img(rsn, 0)
print(first_rsn.shape)

###############################################################################
# first_rsn is a 3D image.
#
# We can then plot it
plotting.plot_stat_map(first_rsn)


###############################################################################
# Looping on all volumes in a 4D file
# -----------------------------------
#
# If we want to plot all the volumes in this 4D file, we can use iter_img
# to loop on them.
Exemplo n.º 42
0
def plot_melodic_components(melodic_dir,
                            in_file,
                            tr=None,
                            out_file='melodic_reportlet.svg',
                            compress='auto',
                            report_mask=None,
                            noise_components_file=None):
    from nilearn.image import index_img, iter_img
    import nibabel as nb
    import numpy as np
    import seaborn as sns
    from matplotlib.gridspec import GridSpec
    import os
    import re
    from io import StringIO
    sns.set_style("white")
    current_palette = sns.color_palette()
    in_nii = nb.load(in_file)
    if not tr:
        tr = in_nii.header.get_zooms()[3]
        units = in_nii.header.get_xyzt_units()
        if units:
            if units[-1] == 'msec':
                tr = tr / 1000.0
            elif units[-1] == 'usec':
                tr = tr / 1000000.0
            elif units[-1] != 'sec':
                NIWORKFLOWS_LOG.warning('Unknown repetition time units '
                                        'specified - assuming seconds')
        else:
            NIWORKFLOWS_LOG.warning(
                'Repetition time units not specified - assuming seconds')

    from nilearn.input_data import NiftiMasker
    from nilearn.plotting import cm

    if not report_mask:
        nifti_masker = NiftiMasker(mask_strategy='epi')
        nifti_masker.fit(index_img(in_nii, range(2)))
        mask_img = nifti_masker.mask_img_
    else:
        mask_img = nb.load(report_mask)

    mask_sl = []
    for j in range(3):
        mask_sl.append(transform_to_2d(mask_img.get_data(), j))

    timeseries = np.loadtxt(os.path.join(melodic_dir, "melodic_mix"))
    power = np.loadtxt(os.path.join(melodic_dir, "melodic_FTmix"))
    stats = np.loadtxt(os.path.join(melodic_dir, "melodic_ICstats"))
    n_components = stats.shape[0]
    Fs = 1.0 / tr
    Ny = Fs / 2
    f = Ny * (np.array(list(range(1, power.shape[0] + 1)))) / (power.shape[0])

    n_rows = int((n_components + (n_components % 2)) / 2)
    fig = plt.figure(figsize=(6.5 * 1.5, n_rows * 0.85))
    gs = GridSpec(n_rows * 2,
                  9,
                  width_ratios=[
                      1,
                      1,
                      1,
                      4,
                      0.001,
                      1,
                      1,
                      1,
                      4,
                  ],
                  height_ratios=[1.1, 1] * n_rows)

    noise_components = None
    if noise_components_file:
        noise_components = np.loadtxt(noise_components_file,
                                      dtype=int,
                                      delimiter=',',
                                      ndmin=1)

    for i, img in enumerate(
            iter_img(os.path.join(melodic_dir, "melodic_IC.nii.gz"))):

        col = i % 2
        row = int(i / 2)
        l_row = row * 2

        # Set default colors
        color_title = 'k'
        color_time = current_palette[0]
        color_power = current_palette[1]

        if noise_components is not None and noise_components.size > 0:
            # If a noise components list is provided, assign red/green
            color_title = color_time = color_power = ('r' if (
                i + 1) in noise_components else 'g')

        data = img.get_data()
        for j in range(3):
            ax1 = fig.add_subplot(gs[l_row:l_row + 2, j + col * 5])
            sl = transform_to_2d(data, j)
            m = np.abs(sl).max()
            ax1.imshow(sl,
                       vmin=-m,
                       vmax=+m,
                       cmap=cm.cold_white_hot,
                       interpolation="nearest")
            ax1.contour(mask_sl[j], levels=[0.5], colors='k', linewidths=0.5)
            plt.axis("off")
            ax1.autoscale_view('tight')
            if j == 0:
                ax1.set_title("C%d: Tot. var. expl. %.2g%%" %
                              (i + 1, stats[i, 1]),
                              x=0,
                              y=1.18,
                              fontsize=7,
                              horizontalalignment='left',
                              verticalalignment='top',
                              color=color_title)

        ax2 = fig.add_subplot(gs[l_row, 3 + col * 5])
        ax3 = fig.add_subplot(gs[l_row + 1, 3 + col * 5])

        ax2.plot(np.arange(len(timeseries[:, i])) * tr,
                 timeseries[:, i],
                 linewidth=min(200 / len(timeseries[:, i]), 1.0),
                 color=color_time)
        ax2.set_xlim([0, len(timeseries[:, i]) * tr])
        ax2.axes.get_yaxis().set_visible(False)
        ax2.autoscale_view('tight')
        ax2.tick_params(axis='both', which='major', pad=0)
        sns.despine(left=True, bottom=True)
        for tick in ax2.xaxis.get_major_ticks():
            tick.label.set_fontsize(6)
            tick.label.set_color(color_time)

        ax3.plot(f[0:],
                 power[0:, i],
                 color=color_power,
                 linewidth=min(100 / len(power[0:, i]), 1.0))
        ax3.set_xlim([f[0], f.max()])
        ax3.axes.get_yaxis().set_visible(False)
        ax3.autoscale_view('tight')
        ax3.tick_params(axis='both', which='major', pad=0)
        for tick in ax3.xaxis.get_major_ticks():
            tick.label.set_fontsize(6)
            tick.label.set_color(color_power)
        sns.despine(left=True, bottom=True)

    plt.subplots_adjust(hspace=0.5)

    image_buf = StringIO()
    fig.savefig(image_buf,
                dpi=300,
                format='svg',
                transparent=True,
                bbox_inches='tight',
                pad_inches=0.01)
    fig.clf()
    image_svg = image_buf.getvalue()

    if compress is True or compress == 'auto':
        image_svg = svg_compress(image_svg, compress)
    image_svg = re.sub(' height="[0-9]+[a-z]*"', '', image_svg, count=1)
    image_svg = re.sub(' width="[0-9]+[a-z]*"', '', image_svg, count=1)
    image_svg = re.sub(' viewBox',
                       ' preseveAspectRation="xMidYMid meet" viewBox',
                       image_svg,
                       count=1)

    with open(out_file, 'w' if PY3 else 'wb') as f:
        f.write(image_svg)
# For each train subject and for the template, the AP contrasts are sorted from
# 0, to 53, and then the PA contrasts from 53 to 106.
#

train_index = range(53)
test_index = range(53, 106)

# We input the mapping image target_train in a list, we could have input more
# than one subject for which we'd want to predict : [train_1, train_2 ...]

prediction_from_template = template_estim.transform([target_train],
                                                    train_index, test_index)

# As a baseline prediction, let's just take the average of activations across subjects.

prediction_from_average = index_img(average_subject, test_index)

###############################################################################
# Score the baseline and the prediction
# -------------------------------------
# We use a utility scoring function to measure the voxelwise correlation
# between the prediction and the ground truth. That is, for each voxel, we
# measure the correlation between its profile of activation without and with
# alignment, to see if alignment was able to predict a signal more alike the ground truth.
#

from fmralign.metrics import score_voxelwise

# Now we use this scoring function to compare the correlation of predictions
# made from group average and from template with the real PA contrasts of sub-07
Exemplo n.º 44
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) -
                                      1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1, )))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
Exemplo n.º 45
0
    n_topics=10,
    n_regions=4,
    symmetric=True,
)
model.fit(n_iters=100, loglikely_freq=20)
model.save("gclda_model.pkl.gz")

# Let's remove the model now that you know how to generate it.
os.remove("gclda_model.pkl.gz")

###############################################################################
# Look at topics
# -----------------------------------------------------------------------------
topic_img_4d = masking.unmask(model.p_voxel_g_topic_.T, model.mask)
for i_topic in range(5):
    topic_img_3d = image.index_img(topic_img_4d, i_topic)
    plotting.plot_stat_map(
        topic_img_3d,
        draw_cross=False,
        colorbar=False,
        annotate=False,
        title=f"Topic {i_topic + 1}",
    )

###############################################################################
# Generate a pseudo-statistic image from text
# -----------------------------------------------------------------------------
text = "dorsal anterior cingulate cortex"
encoded_img, _ = decode.encode.gclda_encode(model, text)
plotting.plot_stat_map(encoded_img, draw_cross=False)
#############################################################################
# Load the behavioral data
# -------------------------
import pandas as pd

# Load target information as string and give a numerical identifier to each

behavioral = pd.read_csv(haxby_dataset.session_target[0], sep=" ")
conditions = behavioral['labels']

# Restrict the analysis to faces and places
from nilearn.image import index_img
condition_mask = behavioral['labels'].isin(['face', 'house'])
conditions = conditions[condition_mask]
func_img = index_img(func_img, condition_mask)

# Confirm that we now have 2 conditions
print(conditions.unique())

# The number of the session is stored in the CSV file giving the behavioral
# data. We have to apply our session mask, to select only faces and houses.
session_label = behavioral['chunks'][condition_mask]

#############################################################################
# ANOVA pipeline with :class:`nilearn.decoding.Decoder` object
# ------------------------------------------------------------
#
# Nilearn Decoder object aims to provide smooth user experience by acting as a
# pipeline of several tasks: preprocessing with NiftiMasker, reducing dimension
# by selecting only relevant features with ANOVA -- a classical univariate
Exemplo n.º 47
0
import numpy as np
import nibabel
from nilearn import datasets

haxby_dataset = datasets.fetch_haxby_simple()

fmri_filename = haxby_dataset.func
fmri_img = nibabel.load(fmri_filename)
y, session = np.loadtxt(haxby_dataset.session_target).astype('int').T
conditions = np.recfromtxt(haxby_dataset.conditions_target)['f0']

### Restrict to faces and houses ##############################################
from nilearn.image import index_img
condition_mask = np.logical_or(conditions == b'face', conditions == b'house')

fmri_img = index_img(fmri_img, condition_mask)
y, session = y[condition_mask], session[condition_mask]
conditions = conditions[condition_mask]

### Prepare masks #############################################################
# - mask_img is the original mask
# - process_mask_img is a subset of mask_img, it contains the voxels that
#   should be processed (we only keep the slice z = 26 and the back of the
#   brain to speed up computation)

mask_img = nibabel.load(haxby_dataset.mask)

# .astype() makes a copy.
process_mask = mask_img.get_data().astype(np.int)
picked_slice = 27
process_mask[..., (picked_slice + 1):] = 0
Exemplo n.º 48
0
def sliced_gif_eff(nifti,
                   gif_path,
                   time_index=range(100, 200),
                   slice_index=range(-4, 52, 7),
                   name=None,
                   vmax=3.5,
                   duration=1000,
                   symmetric_cbar=False,
                   alpha=0.5,
                   **kwargs):
    """
    Plots series of nifti timepoints as nilearn plot_anat_brain in .png format

    :param nifti: Nifti object to be plotted
    :param gif_path: Directory to save .png files
    :param time_index: Time indices to be plotted
    :param slice_index: Coordinates to be plotted
    :param name: Name of output gif
    :param vmax: scale of colorbar (IMPORTANT! change if scale changes are necessary)
    :param kwargs: Other args to be passed to nilearn's plot_anat_brain
    :return:
    """

    images = []
    num_slice = len(slice_index)
    nrow = int(np.floor(np.sqrt(num_slice)))
    ncol = int(np.ceil(num_slice / nrow))

    fig, ax = plt.subplots(nrows=nrow, ncols=ncol,
                           figsize=(10, 10))  # move out of for loop, set ax
    ax = ax.reshape(-1)

    displays = []
    nifti = _utils.check_niimg_4d(nifti)
    for currax, loc in enumerate(slice_index):
        nii_i = image.index_img(nifti, 0)
        if loc == slice_index[len(slice_index) - 1]:
            display = ni_plt.plot_stat_map(nii_i,
                                           cut_coords=[loc],
                                           colorbar=True,
                                           symmetric_cbar=symmetric_cbar,
                                           figure=fig,
                                           axes=ax[currax],
                                           vmax=vmax,
                                           alpha=alpha,
                                           **kwargs)
        else:
            display = ni_plt.plot_stat_map(nii_i,
                                           cut_coords=[loc],
                                           colorbar=False,
                                           symmetric_cbar=symmetric_cbar,
                                           figure=fig,
                                           axes=ax[currax],
                                           vmax=vmax,
                                           alpha=alpha,
                                           **kwargs)
        displays.append(display)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    images.append(Image.open(buf))

    for i in time_index[1:]:
        nii_i = image.index_img(nifti, i)
        for display in displays:
            display.add_overlay(nii_i, colorbar=False, vmax=vmax, alpha=alpha)
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        images.append(Image.open(buf))

    plt.close()

    if name is None:
        gif_outfile = os.path.join(
            gif_path, 'gif_' + str(min(time_index)) + '_' +
            str(max(time_index)) + '.gif')
    else:
        gif_outfile = os.path.join(gif_path, str(name) + '.gif')

    # creates the gif from the frames
    images[0].save(gif_outfile,
                   format='GIF',
                   append_images=images[1:],
                   save_all=True,
                   duration=duration,
                   loop=0)
Exemplo n.º 49
0
    def create_clean_mask(self, num_std_dev=1.5):
        """
        Create a subject-refined version of the clustering mask.
        """
        import os
        from pynets.core import utils
        from nilearn.masking import intersect_masks
        from nilearn.image import index_img, math_img, resample_img

        mask_name = os.path.basename(self.clust_mask).split(".nii")[0]
        self.atlas = f"{mask_name}{'_'}{self.clust_type}{'_k'}{str(self.k)}"
        print(f"\nCreating atlas using {self.clust_type} at cluster level"
              f" {str(self.k)} for {str(self.atlas)}...\n")
        self._dir_path = utils.do_dir_path(self.atlas, self.outdir)
        self.parcellation = f"{self._dir_path}/{mask_name}_" \
                            f"clust-{self.clust_type}" \
                            f"_k{str(self.k)}.nii.gz"

        # Load clustering mask
        self._func_img.set_data_dtype(np.float32)
        func_vol_img = index_img(self._func_img, 1)
        func_vol_img.set_data_dtype(np.uint16)
        clust_mask_res_img = resample_img(
            nib.load(self.clust_mask),
            target_affine=func_vol_img.affine,
            target_shape=func_vol_img.shape,
            interpolation="nearest",
        )
        clust_mask_res_img.set_data_dtype(np.uint16)
        func_data = np.asarray(func_vol_img.dataobj, dtype=np.float32)
        func_int_thr = np.round(
            np.mean(func_data[func_data > 0]) -
            np.std(func_data[func_data > 0]) * num_std_dev,
            3,
        )
        if self.mask is not None:
            self._mask_img = nib.load(self.mask)
            self._mask_img.set_data_dtype(np.uint16)
            mask_res_img = resample_img(
                self._mask_img,
                target_affine=func_vol_img.affine,
                target_shape=func_vol_img.shape,
                interpolation="nearest",
            )
            mask_res_img.set_data_dtype(np.uint16)
            self._clust_mask_corr_img = intersect_masks(
                [
                    math_img(f"img > {func_int_thr}", img=func_vol_img),
                    math_img("img > 0.01", img=clust_mask_res_img),
                    math_img("img > 0.01", img=mask_res_img),
                ],
                threshold=1,
                connected=False,
            )
            self._clust_mask_corr_img.set_data_dtype(np.uint16)
            self._mask_img.uncache()
            mask_res_img.uncache()
        else:
            self._clust_mask_corr_img = intersect_masks(
                [
                    math_img("img > " + str(func_int_thr), img=func_vol_img),
                    math_img("img > 0.01", img=clust_mask_res_img),
                ],
                threshold=1,
                connected=False,
            )
            self._clust_mask_corr_img.set_data_dtype(np.uint16)
        nib.save(self._clust_mask_corr_img,
                 f"{self._dir_path}{'/'}{mask_name}{'.nii.gz'}")

        del func_data
        func_vol_img.uncache()
        clust_mask_res_img.uncache()

        return self.atlas
Exemplo n.º 50
0
behavioral["Label"] = behavioral.replace(
    ['HF_LS_receipt', 'LF_HS_receipt', 'HF_HS_receipt'], 'yummy')

conditions = behavioral["Label"]

#Set targets
condition_mask = behavioral["Label"].isin(['yummy', 'yucky', "rest"])

print(condition_mask.unique(
))  # make sure all the milkshake receipts have been replaced with "milkshake"

# Split data into train and test samples, using the chunks
condition_mask_train = (condition_mask) & (behavioral['sess'] == 0)
condition_mask_test = (condition_mask) & (behavioral['sess'] == 1)

X_train = index_img(dataset, condition_mask_train)
X_test = index_img(dataset, condition_mask_test)
y_train = conditions[condition_mask_train]
y_test = conditions[condition_mask_test]

X_test.shape

# Compute the mean epi to be used for the background of the plotting
from nilearn.image import mean_img
background_img = mean_img(dataset)

## SET & RUN CLASSIFIER - SPACENET WITH GRAPHNET

from nilearn.decoding import SpaceNetClassifier
# Fit model on train data and predict on test data
decoder = SpaceNetClassifier(
Exemplo n.º 51
0
    ]
    components = masker.transform(components_imgs)
    proj, proj_inv, rec = make_projection_matrix(components, scale_bases=True)
    dump(rec, join(get_output_dir(), 'benchmark', 'rec.pkl'))


def load_rec():
    return load(join(get_output_dir(), 'benchmark', 'rec.pkl'))


# compute_rec()

exp_dirs = join(get_output_dir(), 'single_exp', '8')
models = []
rec = load_rec()
mask_img = fetch_mask()
masker = MultiNiftiMasker(mask_img=mask_img).fit()

for exp_dir in [exp_dirs]:
    estimator = load(join(exp_dirs, 'estimator.pkl'))
    transformer = load(join(exp_dirs, 'transformer.pkl'))
    print([(dataset, this_class)
           for dataset, lbin in transformer.lbins_.items()
           for this_class in lbin.classes_])
    coef = estimator.coef_
    coef_rec = coef.dot(rec)
    print(join(exp_dirs, 'maps.nii.gz'))
    imgs = masker.inverse_transform(coef_rec)
    imgs.to_filename(join(exp_dirs, 'maps.nii.gz'))
plot_stat_map(index_img(imgs, 10))
plt.show()
extraction.fit()
regions_img = extraction.regions_img_

################################################################################
# Visualization
# Show region extraction results by importing image & plotting utilities
from nilearn import plotting
from nilearn.image import index_img
from nilearn.plotting import find_xyz_cut_coords

# Showing region extraction results using 4D maps visualization tool
plotting.plot_prob_atlas(regions_img, display_mode='z', cut_coords=1,
                         view_type='contours', title="Regions extracted.")

# To reduce the complexity, we choose to display all the regions
# extracted from network 3
import numpy as np

DMN_network = index_img(atlas_networks, 3)
plotting.plot_stat_map(DMN_network, display_mode='z', cut_coords=1,
                       title='Network 3', colorbar=False)

regions_indices_network3 = np.where(np.array(extraction.index_) == 3)
for index in regions_indices_network3[0]:
    cur_img = index_img(extraction.regions_img_, index)
    coords = find_xyz_cut_coords(cur_img)
    plotting.plot_stat_map(cur_img, display_mode='z', cut_coords=coords[2:3],
                           title="Blob of network3", colorbar=False)

plotting.show()
# Then find the center of the regions and plot a connectome
regions_img = regions_extracted_img
coords_connectome = plotting.find_probabilistic_atlas_cut_coords(regions_img)

plotting.plot_connectome(mean_correlations, coords_connectome,
                         edge_threshold='90%', title=title)

################################################################################
# Plot regions extracted for only one specific network
# ----------------------------------------------------

# First, we plot a network of index=4 without region extraction (left plot)
from nilearn import image

img = image.index_img(components_img, 4)
coords = plotting.find_xyz_cut_coords(img)
display = plotting.plot_stat_map(img, cut_coords=coords, colorbar=False,
                                 title='Showing one specific network')

################################################################################
# Now, we plot (right side) same network after region extraction to show that
# connected regions are nicely seperated.
# Each brain extracted region is identified as separate color.

# For this, we take the indices of the all regions extracted related to original
# network given as 4.
regions_indices_of_map3 = np.where(np.array(regions_index) == 4)

display = plotting.plot_anat(cut_coords=coords,
                             title='Regions from this network')
Exemplo n.º 54
0
plotting.plot_epi(mean_func_img, cut_coords=cut_coords,
                  title='Original (%i voxels)' % original_voxels,
                  vmax=vmax, vmin=vmin, display_mode='xz')

# A reduced dataset can be created by taking the parcel-level average:
# Note that Parcellation objects with any method have the opportunity to
# use a `transform` call that modifies input features. Here it reduces their
# dimension. Note that we `fit` before calling a `transform` so that average
# signals can be created on the brain parcellations with fit call.
fmri_reduced = ward.transform(dataset.func)

# Display the corresponding data compressed using the parcellation using
# parcels=2000.
fmri_compressed = ward.inverse_transform(fmri_reduced)

plotting.plot_epi(index_img(fmri_compressed, 0),
                  cut_coords=cut_coords,
                  title='Ward compressed representation (2000 parcels)',
                  vmin=vmin, vmax=vmax, display_mode='xz')
# As you can see below, this approximation is almost good, although there
# are only 2000 parcels, instead of the original 60000 voxels

#########################################################################
# Brain parcellations with KMeans Clustering
# ------------------------------------------
#
# We use the same approach as with building parcellations using Ward
# clustering. But, in the range of a small number of clusters,
# it is most likely that we want to use standardization. Indeed with
# standardization and smoothing, the clusters will form as regions.
Exemplo n.º 55
0
"""
############################################################################
# Fetching probabilistic atlas - MSDL atlas
# -----------------------------------------
from nilearn import datasets

atlas_data = datasets.fetch_atlas_msdl()
atlas_filename = atlas_data.maps

#############################################################################
# Visualizing a probabilistic atlas with plot_stat_map and add_overlay object
# ---------------------------------------------------------------------------
from nilearn import plotting, image

# First plot the map for the PCC: index 4 in the atlas
display = plotting.plot_stat_map(image.index_img(atlas_filename, 4),
                                 colorbar=False,
                                 title="DMN nodes in MSDL atlas")

# Now add as an overlay the maps for the ACC and the left and right
# parietal nodes
display.add_overlay(image.index_img(atlas_filename, 5),
                    cmap=plotting.cm.black_blue)
display.add_overlay(image.index_img(atlas_filename, 6),
                    cmap=plotting.cm.black_green)
display.add_overlay(image.index_img(atlas_filename, 3),
                    cmap=plotting.cm.black_pink)

plotting.show()

###############################################################################
Exemplo n.º 56
0
                     backend='c',
                     # trace_folder=trace_folder,
                     n_jobs=n_jobs,
                     )

print('[Example] Learning maps')
t0 = time.time()
dict_fact.fit(func_filenames)
print('[Example] Dumping results')
# Decomposition estimator embeds their own masker
masker = dict_fact.masker_
components_img = masker.inverse_transform(dict_fact.components_)
components_img.to_filename(join(trace_folder, 'components.nii.gz'))
time = time.time() - t0
print('[Example] Run in %.2f s' % time)
# Show components from both methods using 4D plotting tools
import matplotlib.pyplot as plt
from nilearn.plotting import plot_prob_atlas, plot_stat_map, show
from nilearn.image import index_img

print('[Example] Displaying')
fig, axes = plt.subplots(2, 1)
plot_prob_atlas(components_img, view_type="filled_contours",
                axes=axes[0])
plot_stat_map(index_img(components_img, 0),
              axes=axes[1],
              colorbar=False,
              threshold=0)
plt.savefig(join(trace_folder, 'components.pdf'))
show()
Exemplo n.º 57
0
# load labels
import pandas as pd
labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ")
stimuli = labels['labels']
# identify resting state labels in order to be able to remove them
task_mask = (stimuli != 'rest')

# find names of remaining active labels
categories = stimuli[task_mask].unique()

# extract tags indicating to which acquisition run a tag belongs
session_labels = labels["chunks"][task_mask]

# apply the task_mask to  fMRI data (func_filename)
from nilearn.image import index_img
task_data = index_img(func_filename, task_mask)

##########################################################################
# Decoding on the different masks
# --------------------------------
#
# The classifier used here is a support vector classifier (svc). We use
# class:`nilearn.decoding.Decoder` and specify the classifier.
import numpy as np
from nilearn.decoding import Decoder

# Make a data splitting object for cross validation
from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
cv = LeaveOneGroupOut()

##############################################################
Exemplo n.º 58
0
mean_func_img = mean_img(func_filename)

plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask")


###########################################################################
# Preprocess data with the NiftiMasker
nifti_masker.fit(func_filename)
fmri_masked = nifti_masker.transform(func_filename)
# fmri_masked is now a 2D matrix, (n_voxels x n_time_points)

###########################################################################
# Run an algorithm
from sklearn.decomposition import FastICA
n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(fmri_masked.T).T

###########################################################################
# Reverse masking, and display the corresponding map
components = nifti_masker.inverse_transform(components_masked)

# Visualize results
from nilearn.plotting import plot_stat_map, show
from nilearn.image import index_img

plot_stat_map(index_img(components, 0), mean_func_img,
              display_mode='y', cut_coords=4, title="Component 0")

show()
    # Grab extracted components umasked back to Nifti image.
    # Note: For older versions, less than 0.4.1. components_img_
    # is not implemented. See Note section above for details.
    components_img = estimator.components_img_
    components_img.to_filename('%s_resting_state.nii.gz' %
                               names[estimator])
    components_imgs.append(components_img)

###############################################################################
# Visualize the results
# ----------------------
from nilearn.plotting import (plot_prob_atlas, find_xyz_cut_coords, show,
                              plot_stat_map)
from nilearn.image import index_img

# Selecting specific maps to display: maps were manually chosen to be similar
indices = {dict_learning: 25, canica: 33}
# We select relevant cut coordinates for displaying
cut_component = index_img(components_imgs[0], indices[dict_learning])
cut_coords = find_xyz_cut_coords(cut_component)
for estimator, components in zip(estimators, components_imgs):
    # 4D plotting
    plot_prob_atlas(components, view_type="filled_contours",
                    title="%s" % names[estimator],
                    cut_coords=cut_coords, colorbar=False)
    # 3D plotting
    plot_stat_map(index_img(components, indices[estimator]),
                  title="%s" % names[estimator],
                  cut_coords=cut_coords, colorbar=False)
show()
Exemplo n.º 60
0
"""
############################################################################
# Fetching probabilistic atlas - MSDL atlas
# -----------------------------------------
from nilearn import datasets

atlas_data = datasets.fetch_atlas_msdl()
atlas_filename = atlas_data.maps

#############################################################################
# Visualizing a probabilistic atlas with plot_stat_map and add_overlay object
# ---------------------------------------------------------------------------
from nilearn import plotting, image

# First plot the map for the PCC: index 4 in the atlas
display = plotting.plot_stat_map(image.index_img(atlas_filename, 4),
                                 colorbar=False,
                                 title="DMN nodes in MSDL atlas")

# Now add as an overlay the maps for the ACC and the left and right
# parietal nodes
display.add_overlay(image.index_img(atlas_filename, 5),
                    cmap=plotting.cm.black_blue)
display.add_overlay(image.index_img(atlas_filename, 6),
                    cmap=plotting.cm.black_green)
display.add_overlay(image.index_img(atlas_filename, 3),
                    cmap=plotting.cm.black_pink)

plotting.show()