def get_ICA_parcellation(map_files,
                         mask_loc,
                         working_dir,
                         second_level_dir,
                         n_comps=20,
                         smoothing=4.4,
                         filename=None):
    try:
        map_files = flatten(map_files.values())
    except AttributeError:
        pass
    group_mask = nibabel.load(mask_loc)
    ##  get components
    canica = CanICA(
        mask=group_mask,
        n_components=n_comps,
        smoothing_fwhm=smoothing,
        memory=path.join(working_dir, "nilearn_cache"),
        memory_level=2,
        threshold=3.,
        verbose=10,
        random_state=0)  # multi-level components modeling across subjects
    canica.fit(map_files)
    masker = canica.masker_
    components_img = masker.inverse_transform(canica.components_)
    if filename is not None:
        prefix = filename + '_'
        components_img.to_filename(
            path.join(second_level_dir, 'parcellation',
                      '%scanica%s.nii.gz' % (prefix, n_comps)))
    return components_img
예제 #2
0
def run(args=None, config=None):
    parser = AnalysisParser('config')
    args = parser.parse_analysis_args(args)
    config = args.config

    subs = ['CoRe_011', 'CoRe_023', 'CoRe_054', 'CoRe_079', 'CoRe_082', 'CoRe_087', 'CoRe_094', 'CoRe_100',
            'CoRe_107', 'CoRe_155', 'CoRe_192', 'CoRe_195', 'CoRe_220', 'CoRe_235', 'CoRe_267', 'CoRe_268']

    base_path = '/media/sf_hcp/sleepdata/'


    for sb_i in np.arange(0,len(subs)):
        
        sb_i=1
        sb = subs[sb_i]
        day1_fmri = glob.glob(base_path + sb + '/proc/*nii')
        day1_vmrk = glob.glob(base_path + sb + '/proc/*Day*1*_N*vmrk')
        print(day1_fmri)
        
        fmri = nib.load(day1_fmri[0])
        
        canica = CanICA(n_components=40, smoothing_fwhm=6.,
                    threshold=None, verbose=10, random_state=0)
        
        
        fmri_info = helpers.fmri_info(day1_fmri[0])
        canica.fit(fmri)
        cimg = canica.components_img_.get_data()
        TR = fmri_info[0]
        tr_times = np.arange(0, 30, TR)
        hrf = helpers.get_hrf(tr_times)
        #        %matplotlib auto 

        for i in np.arange(0,40):
            plt.subplot(4,10,i+1)
            plt.imshow(np.max(cimg[:,:,:,i],axis=2))
            plt.title(str(i))
            
            
        # in order: DMN, auditory, visual, lingual, parietal, striatal, thalamic
        comps = [35,28,30,39,8,9,32]
        allcomp_ts = canica.transform([fmri])[0].transpose()
        comps_ts = allcomp_ts[comps,:]
        
        network_labs = ['DMN','auditory','visual','lingual',
                       'parietal','striatal','thalamic']
       
        for i in np.arange(0,len(comps)):
            plt.subplot(2,5,i+1)
            plt.imshow(np.max(cimg[:,:,:,comps[i]],axis=2))
            plt.title(network_labs[i])
            
        np.save(str.replace(day1_fmri[0],'.nii','_comps'), comps_ts)
예제 #3
0
    def _estimate_atlas_weights(self, images, params):

        nii_images, nii_mask = convert_images_nifti(images)
        bg_offset = 1 if params.get('force_bg', False) else 1

        canica = CanICA(mask=nii_mask,
                        n_components=params.get('nb_labels') - bg_offset,  # - 1
                        mask_strategy='background',
                        threshold='auto',
                        n_init=5,
                        n_jobs=1,
                        verbose=0)
        canica.fit(nii_images)
        components = np.argmax(canica.components_, axis=0) + bg_offset  # + 1
        atlas = components.reshape(images[0].shape)

        atlas = segmentation.relabel_sequential(atlas)[0]

        weights = [weights_image_atlas_overlap_major(img, atlas) for img in self._images]
        weights = np.array(weights)

        return atlas, weights, None
예제 #4
0
class roi_time_series:
	def __init__(self, canica=None):
		self.canica = None

	def _set_ICA(self, imgs, n_components=20, verbose=0):
		self.canica = CanICA(n_components=n_components, smoothing_fwhm=6.,
							memory="nilearn_cache", memory_level=2,
							threshold=3., verbose=verbose, random_state=0)
		self._fit_ICA(imgs)

	def _fit_ICA(self, imgs):
		self.canica.fit(imgs)
        
	def get_mask(self, component=0):
        
		return np.argwhere(image.index_img(self.canica.components_img_, component)._dataobj.astype('bool') == True)

	def get_ROI_time_series(self, imgs, component=0, n_components=20, verbose=False):

		#smooth image
		fmri_original = image.load_img(imgs)
		fmri_img = image.smooth_img(fmri_original, fwhm=6)

		#perform ICA and get components
		if(self.canica == None):
			if(verbose):
				print("New ICA computation")
			self._set_ICA(fmri_img, n_components=n_components)

		components_img = self.canica.components_img_

		#build masker
		roi_masker = NiftiMasker(mask_img=image.index_img(components_img, component),
								standardize=True,
								memory="nilearn_cache",
								smoothing_fwhm=8)

		return _apply_mask(imgs, roi_masker.mask_img)
예제 #5
0
if (args.canica):
    from nilearn.decomposition import CanICA

    logger.info("Canica algorithm starting...")
    canica = CanICA(n_components=args.n_components,
                    smoothing_fwhm=args.fwhm,
                    memory_level=2,
                    threshold=3.,
                    verbose=args.verbose,
                    random_state=0,
                    n_jobs=args.n_jobs,
                    t_r=TR,
                    standardize=args.standarize,
                    high_pass=args.highpass,
                    low_pass=args.lowpass)
    canica.fit(func_filenames, confounds=confounds_components)

    # Retrieve the independent components in brain space
    components_img = canica.masker_.inverse_transform(canica.components_)
    # components_img is a Nifti Image object, and can be saved to a file with
    # the following line:
    canica_dir = data_dir + '/' + split + '/' + 'canica'
    create_dir(canica_dir)
    filename = canica_dir + '/' + fwhm + '_resting_state_all.nii.gz'
    # save components image
    components_img.to_filename(op.join(data_dir, filename))
    # update configuration file
    experiment["files_path"]["brain_atlas"]["components_img"] = filename
    local_config["experiment"] = experiment

    # -----------------------------------------------------------------
예제 #6
0
                mask=dataset.mask,
                smoothing_fwhm=None,
                do_cca=True,
                threshold=3.,
                n_init=10,
                standardize=True,
                random_state=42,
                n_jobs=2,
                verbose=2)

CANICA_PATH = set_data_base_dir('Dynacomp/canica')
output_file = os.path.join(CANICA_PATH,
                           'canica_' + str(n_components) + '.nii.gz')

if not os.path.isfile(output_file):
    canica.fit(func)
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(output_file)
else:
    components_img = output_file

FIG_PATH = set_figure_base_dir('canica')

for i, cur_img in enumerate(iter_img(components_img)):
    output_file = os.path.join(FIG_PATH,
                               'canica_' + str(n_components) + '_IC%d' % i)
    plot_stat_map(cur_img,
                  display_mode="z",
                  title="IC %d" % i,
                  cut_coords=1,
                  colorbar=False,
예제 #7
0
func_filenames = subject_filename  # list of 4D nifti files for each subject

# print basic information on the dataset
print('First functional nifti image (4D) is at: %s' %
      func_filenames)  # 4D data

from nilearn.decomposition import CanICA

canica = CanICA(n_components=20,
                memory="nilearn_cache",
                memory_level=2,
                verbose=10,
                mask_strategy='template',
                random_state=0)
canica.fit(func_filenames)

# Retrieve the independent components in brain space. Directly
# accesible through attribute `components_img_`.
canica_components_img = canica.components_img_
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
canica_components_img.to_filename(
    '/home/bk/Desktop/bkrest/canica_resting_state.nii.gz')

from nilearn.plotting import plot_prob_atlas

# Plot all ICA components together
plot_prob_atlas(canica_components_img, title='All ICA components')

from nilearn.image import iter_img
예제 #8
0
                memory="nilearn_cache",
                memory_level=2,
                threshold=None,
                n_init=10,
                verbose=1,
                mask_strategy='epi',
                smoothing_fwhm=6,
                detrend=True,
                high_pass=0.008,
                t_r=0.72)

## Run ICA
start = time.time()
print("RUNNING ICA")

canica.fit(nifti_list)

end = time.time()
print('Elapsed time %f' % (end - start))

# Save-stuff
canica.components_img_.to_filename('wm_ica_components_d%i.nii.gz' %
                                   (n_components))
canica.mask_img_.to_filename('wm_ica_mask.nii.gz')

## Project all data into ICA space
print('Transforming data...')
start = time.time()
X = canica.transform(nifti_list)
end = time.time()
print('Elapsed time %f' % (end - start))
adhd_dataset = datasets.fetch_adhd(n_subjects=30)
func_filenames = adhd_dataset.func  # list of 4D nifti files for each subject

# print basic information on the dataset
print('First functional nifti image (4D) is at: %s' %
      func_filenames[0])  # 4D data


####################################################################
# Here we apply CanICA on the data
from nilearn.decomposition import CanICA

canica = CanICA(n_components=20, smoothing_fwhm=6.,
                memory="nilearn_cache", memory_level=5,
                threshold=3., verbose=10, random_state=0)
canica.fit(func_filenames)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
components_img.to_filename('canica_resting_state.nii.gz')


####################################################################
# To visualize we plot the outline of all components on one figure
from nilearn.plotting import plot_prob_atlas

# Plot all ICA components together
plot_prob_atlas(components_img, title='All ICA components')
예제 #10
0
from nilearn.image import iter_img


dataset = load_dynacomp()
func = dataset.func1

n_components = 20
canica = CanICA(n_components=n_components, mask=dataset.mask,
                smoothing_fwhm=None, do_cca=True, threshold=3.,
                n_init=10, standardize=True, random_state=42,
                n_jobs=2, verbose=2)

CANICA_PATH = set_data_base_dir('Dynacomp/canica')
output_file = os.path.join(CANICA_PATH,
                           'canica_' + str(n_components) + '.nii.gz')

if not os.path.isfile(output_file):
    canica.fit(func)
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(output_file)
else:
    components_img = output_file

FIG_PATH = set_figure_base_dir('canica')

for i, cur_img in enumerate(iter_img(components_img)):
    output_file = os.path.join(FIG_PATH,
                               'canica_'+ str(n_components) +'_IC%d' % i )
    plot_stat_map(cur_img, display_mode="z", title="IC %d" % i, cut_coords=1,
                  colorbar=False, output_file=output_file)
예제 #11
0
    registrator.output_dir = os.path.join('ica', animal_id)
    registrator.fit_anat(anat)
    registrator.fit_modality(func,
                             'func',
                             t_r=1.,
                             voxel_size=(.3, .3, .3),
                             prior_rigid_body_registration=True)
    registered_funcs.append(registrator.registered_func_)

##############################################################################
# Run ICA
# -------
from nilearn.decomposition import CanICA

canica = CanICA(n_components=30, smoothing_fwhm=.3, n_jobs=-1)
canica.fit(registered_funcs)

##############################################################################
# Retrieve the independent components in brain space.
components_img = canica.masker_.inverse_transform(canica.components_)

##############################################################################
# Visualize the components
# ------------------------
# We can plot the outline of all components on one figure.
from nilearn import plotting

plotting.plot_prob_atlas(components_img,
                         bg_img=registrator.template_brain_,
                         display_mode='z',
                         title='All ICA components')
예제 #12
0










# perform an ICA given the subset of the data. 
ica = CanICA(n_components=20,
                random_state=0)

ica.fit(img)
components_img=ica.components_img_
plot_prob_atlas(components_img, title='All ICA components')
plt.show()

components_img_1st=image.index_img(components_img, 0)
ica_time_series=create_mask(components_img_1st, img)
ica_cor_matrix=calc_correlation_matrix(ica_time_series)
plot_cor_matrix(ica_cor_matrix, 'ICA correlation matrix')




'''Now let's take a look at the Schizophrenic subject to compare'''
path_sz = '/Volumes/Byrgenwerth/Datasets/UCLA Consortium for Neuropsychiatric Phenomics LA5c Study/sub-50006/func/sub-50006_task-rest_bold.nii.gz'
img_sz= load_image(path_sz)
예제 #13
0
def run(args=None, config=None):
    parser = AnalysisParser('config')
    args = parser.parse_analysis_args(args)
    config = args.config

    eeg_path = '/media/sf_shared/graddata/ica_denoised_raw.fif'
    fmri_path = '/media/sf_shared/CoRe_011/rfMRI/d2/11-BOLD_Sleep_BOLD_Sleep_20150824220820_11.nii'
    vmrk_path = '/media/sf_shared/CoRe_011/eeg/CoRe_011_Day2_Night_01.vmrk'
    event_ids, event_lats = helpers.read_vmrk(vmrk_path)

    event_lats = np.array(event_lats)
    grad_inds = [
        index for index, value in enumerate(event_ids) if value == 'R1'
    ]
    grad_inds = np.array(grad_inds)
    grad_lats = event_lats[grad_inds]
    grad_lats = grad_lats / 20  # resample from 5000Hz to 250Hz
    start_ind = int(grad_lats[0])
    end_ind = int(grad_lats[-1])

    canica = CanICA(n_components=40,
                    smoothing_fwhm=6.,
                    threshold=None,
                    verbose=10,
                    random_state=0)

    fmri = nib.load(fmri_path)
    # get TR, n_slices, and n_TRs
    fmri_info = helpers.fmri_info(fmri_path)
    canica.fit(fmri)
    cimg = canica.components_img_.get_data()
    TR = fmri_info[0]
    tr_times = np.arange(0, 30, TR)
    hrf = get_hrf(tr_times)

    # plot components
    for i in np.arange(0, 40):
        plt.subplot(4, 10, i + 1)
        plt.imshow(np.max(cimg[:, :, :, i], axis=2))

    # get the EEG
    raw = mne.io.read_raw_fif(eeg_path, preload=True)
    raw_data = raw.get_data()

    # get power spectrum for different sleep stages (BOLD)
    comps = canica.transform([fmri])[0].transpose()
    bold_srate = 1 / fmri_info[0]
    bold_epochl = int(7500 / (250 / bold_srate))

    #bold_pxx,bold_f = pxx_bold_component_epoch(comps, bold_srate, 250, bold_epochl, sleep_stages)
    #eeg_pxx,eeg_f = pxx_eeg_epochs(raw_data, sleep_stages, 7500)

    # concatenate the epochs, then compute the psd
    # 1) get triggers, 2) concatenate data, 3) compute psd

    def get_trigger_inds(trigger_name, event_ids):
        trig_inds = [
            index for index, value in enumerate(event_ids)
            if value == trigger_names[trig]
        ]
        return trig_inds

    def epoch_triggers(raw_data, lats, pre_samples, post_samples):
        epochs = np.zeros(
            (raw_data.shape[0], lats.shape[0], pre_samples + post_samples))
        for lat in np.arange(0, lats.shape[0]):
            epochs[:, lat, :] = raw_data[:, lats[lat] - pre_samples:lats[lat] +
                                         post_samples]

        return epochs

    trigger_names = ['wake', 'NREM1', 'NREM2', 'NREM3']
    """
    epoch BOLD and get power for different trigger types 
    what you actually want is single trial EEG and BOLD psd

    first get all the indices that are contained within the BOLD timeseries
    then, get the EEG power spectrum values within those same indices 
    """
    eeg_srate = 250
    bold_pre_samples = 15
    bold_post_samples = 25
    eeg_pre_samples = int(bold_pre_samples * fmri_info[0] * eeg_srate)
    eeg_post_samples = int(bold_post_samples * fmri_info[0] * eeg_srate)
    bold_conversion = eeg_srate / (1 / fmri_info[0])

    all_bold_epochs = []
    all_eeg_epochs = []
    for trig in np.arange(0, len(trigger_names)):
        trig_inds = get_trigger_inds(trigger_names[trig], event_ids)
        trig_lats = event_lats[trig_inds]
        bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int)
        bads = np.where((bold_lats - bold_pre_samples < 0)
                        | (bold_lats + bold_post_samples >= comps.shape[1]))

        bold_lats = np.delete(bold_lats, bads, axis=0)
        eeg_lats = np.delete(trig_lats, bads, axis=0)

        bold_epochs = epoch_triggers(comps, bold_lats, bold_pre_samples,
                                     bold_post_samples)
        eeg_epochs = epoch_triggers(raw_data, eeg_lats, eeg_pre_samples,
                                    eeg_post_samples)

        all_bold_epochs.append(bold_epochs)
        all_eeg_epochs.append(eeg_epochs)

    # comput power
    for i in np.arange(0, len(all_eeg_epochs)):
        eeg_epochs = all_eeg_epochs[i]
        bold_epochs = all_bold_epochs[i]
        bold_f, bold_pxx = signal.welch(bold_epochs)
        eeg_f, eeg_pxx = signal.welch(eeg_epochs)

    gauss = signal.gaussian(eeg_srate, 20)
    gauss = gauss / np.sum(gauss)

    freqs = np.zeros((5, 2))
    freqs[0, 0] = 1
    freqs[0, 1] = 3
    freqs[1, 0] = 4
    freqs[1, 1] = 7
    freqs[2, 0] = 8
    freqs[2, 1] = 15
    freqs[3, 0] = 17
    freqs[3, 1] = 30
    freqs[4, 0] = 30
    freqs[4, 1] = 80

    chan_freqs = filter_and_downsample(raw_data, comps, freqs, start_ind,
                                       end_ind)
    conved = convolve_chanfreqs(np.log(chan_freqs), hrf)

    # epoch all the hrf-convolved filtered EEG power
    all_conved_epochs = []
    for trig in np.arange(0, len(trigger_names)):
        trig_inds = get_trigger_inds(trigger_names[trig], event_ids)
        trig_lats = event_lats[trig_inds]
        bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int)
        bads = np.where((bold_lats - bold_pre_samples < 0)
                        | (bold_lats + bold_post_samples >= comps.shape[1]))
        bold_lats = np.delete(bold_lats, bads, axis=0)

        conved_epochs = np.zeros(
            (conved.shape[0], conved.shape[1], bold_lats.shape[0],
             bold_pre_samples + bold_post_samples))
        for i in np.arange(0, conved.shape[1]):
            conved_epochs[:, i, :] = epoch_triggers(conved[:, i, :], bold_lats,
                                                    bold_pre_samples,
                                                    bold_post_samples)

        all_conved_epochs.append(conved_epochs)

    sig1 = chan_freqs[3, 2, :]
    sig2 = comps[0, :]
    sig2 = butter_bandpass_filter(sig2, 0.005, 0.1, 1 / fmri_info[0])
    nlags = 50

    def xcorr(sig1, sig2, nlags):
        vec_l = sig1.shape[0] - nlags
        xcorrs = np.zeros(nlags)
        vec1 = sig1[int(sig1.shape[0] / 2 - vec_l / 2):int(sig1.shape[0] / 2 +
                                                           vec_l / 2)]
        start_p = 0
        for i in np.arange(0, nlags):
            vec2 = sig2[(start_p + i):(start_p + vec_l + i)]
            xcorrs[i] = np.corrcoef(vec1, vec2)[0, 1]

        return xcorrs

    all_xcorrs = []
    for i in np.arange(0, len(all_conved_epochs)):

        xc_i = np.zeros(
            (1, all_conved_epochs[i].shape[1], all_conved_epochs[i].shape[2],
             all_bold_epochs[i].shape[0], 20))

        for j in np.arange(0, 1):
            print(j)
            for k in np.arange(0, all_conved_epochs[i].shape[1]):
                for el in np.arange(0, all_conved_epochs[i].shape[2]):
                    for m in np.arange(0, all_bold_epochs[i].shape[0]):
                        xc_i[j, k, el,
                             m, :] = xcorr(all_conved_epochs[i][5, k, el, :],
                                           all_bold_epochs[i][m, el, :], 20)

        all_xcorrs.append(xc_i)

    plt.plot(np.mean(all_xcorrs[1][0, 1, :, 0, :], axis=0))
    plt.plot(np.mean(all_xcorrs[2][0, 1, :, 0, :], axis=0))
    plt.plot(np.mean(all_xcorrs[3][0, 1, :, 0, :], axis=0))

    # correlate power across different epochs
    """
예제 #14
0
#########################################################################
# Preprocessing
#func_data_filename = os.path.join(data_dir, func_data_filename)
#sub_img = index_img(load_img(func_data_filename), slice(idx_start, None))
#sub_img.to_filename(os.path.join(data_dir,"func.nii"))

subject_data = do_subjects_preproc(jobfile, dataset_dir=data_dir)[0]
fmri_img = load_img(subject_data.func[0])

#########################################################################
# ICA analysis
ica_output_dir = os.path.join(output_dir, "ica")
if not os.path.exists(ica_output_dir):
    os.makedirs(ica_output_dir)
canica = CanICA(n_components=20,
                smoothing_fwhm=6.,
                memory="nilearn_cache",
                memory_level=2,
                threshold=0.6,
                verbose=10,
                random_state=random_state)
canica.fit(fmri_img)
components_img = canica.masker_.inverse_transform(canica.components_)
for i, cur_img in enumerate(iter_img(components_img)):
    display = niplt.plot_stat_map(cur_img,
                                  cut_coords=pcc_coords,
                                  title="IC {0}".format(i))
    display.savefig(os.path.join(ica_output_dir, "ica_n_{0}.png".format(i)))
    cur_img.to_filename(
        os.path.join(ica_output_dir, "ica_n_{0}.nii.gz".format(i)))
예제 #15
0
def run_canica_subject(
        sub_id,
        cond_id='D1_D5',
        ds_dir='/data/BnB_USER/oliver/somato',
        out_basedir='/home/homeGlobal/oli/somato/scratch/ica/CanICA',
        ncomps=50,
        smoothing=3,
        caa=True,
        standard=True,
        detr=True,
        highpass=.01953125,
        tr=2.,
        masktype='epi',
        ninit=10,
        seed=42,
        verb=10):
    """
    Run Nilearn's CanICA on a single condition of a single subject.
    """
    # load example image
    bold_file = pjoin(ds_dir, sub_id, cond_id, 'data.nii.gz')
    bold_img = load_img(bold_file)
    # paths to output
    out_dir = pjoin(out_basedir, sub_id, cond_id)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    out_comp_nii = pjoin(out_dir, 'components.nii.gz')
    out_components_arr = pjoin(out_dir, 'components.npy')
    out_png = pjoin(out_dir, 'components_probatlas.png')

    # set up ica
    ica = CanICA(n_components=ncomps,
                 smoothing_fwhm=smoothing,
                 do_cca=caa,
                 standardize=standard,
                 detrend=detr,
                 mask_strategy=masktype,
                 high_pass=highpass,
                 t_r=tr,
                 n_init=ninit,
                 random_state=seed,
                 verbose=verb)
    # more interesting arguments
    # mask_strategy='mni_something, mask_args=see nilearn.masking.compute_epi_mask, threshold=3.

    # fit ica
    ica.fit(bold_img)

    # save components as 4d nifti
    components_img = ica.components_img_
    components_img.to_filename(out_comp_nii)
    # plot components as prob atlas and save plot
    g = plot_prob_atlas(components_img, bg_img=mean_img(bold_img))
    g.savefig(out_png, dpi=300)
    # save components as 2d np array
    components_arr = ica.components_
    np.save(out_components_arr, components_arr)
    # save automatically generated epi mask
    if masktype == 'epi':
        mask_img = ica.mask_img_
        out_mask_img = pjoin(out_dir, 'mask_img.nii.gz')
        mask_img.to_filename(out_mask_img)

    return ica  # return ica object for later use
예제 #16
0
      mean_C_matrix_sch_S2R1 = C_matrices_sch_S2R1.mean
      print('Mean correlation has shape {0}.'.format(mean_C_matrix_sch_S2R1))
      mean_PC_matrix_yeo_S2R1 = PC_matrices_yeo_S2R1.mean
      print('Mean correlation has shape {0}.'.format(mean_PC_matrix_yeo_S2R1))
      mean_PC_matrix_sch_S2R1 = PC_matrices_sch_S2R1.mean
      print('Mean correlation has shape {0}.'.format(mean_PC_matrix_sch_S2R1))




##### Deriving our own ICA components #####
from nilearn.decomposition import CanICA
canica = CanICA(n_components=7, smoothing_fwhm=5.,
                memory="nilearn_cache", memory_level=2,
                threshold=5., verbose=10, random_state=0)
canica.fit(path_img_S2R1)
#Saving the components as Nifti
components_img = canica.components_img_
components_img.to_filename('/Volumes/Harddrive_CTS/MRI/Placebo_1_Analysis/canica_resting_state.nii.gz')
#Visualize all components on a plot
%matplotlib gtk
from nilearn.plotting import plot_prob_atlas
import matplotlib.pyplot as plt
from nilearn import plotting
plot_prob_atlas(components_img, title='All 7 ICA components')
# Mapping components each ICA seperately
from nilearn.image import iter_img
from nilearn.plotting import plot_stat_map, show
for i, cur_img in enumerate(iter_img(components_img)):
    plot_stat_map(cur_img, display_mode = "z", title = "IC %d" % i,
                  cut_coords = 1, colorbar = False)
예제 #17
0
    def _run_interface(self, runtime):

        from nilearn.decomposition import CanICA, DictLearning
        from nilearn.plotting import plot_stat_map, plot_prob_atlas
        from nilearn.image import iter_img
        from nilearn.input_data import NiftiMapsMasker
        import numpy as np

        canica = CanICA(n_components=self.inputs.n_components,
                        smoothing_fwhm=None,
                        low_pass=self.inputs.low_pass,
                        high_pass=self.inputs.high_pass,
                        t_r=self.inputs.tr,
                        do_cca=True,
                        detrend=True,
                        standardize=True,
                        threshold='auto',
                        random_state=0,
                        n_jobs=2,
                        memory_level=2,
                        memory="nilearn_cache")

        canica.fit(self.inputs.in_file, confounds=self.inputs.confounds_file)
        # canica.fit(self.inputs.in_file)

        components_img = canica.components_img_
        components_img.to_filename('descomposition_canica.nii.gz')

        # plot_prob_atlas(components_img, title='All ICA components')


        masker = NiftiMapsMasker(maps_img=components_img, standardize=True, memory='nilearn_cache', verbose=5)
        time_series_ica = masker.fit_transform(self.inputs.in_file)

        np.savetxt('time_series_ica.csv', np.asarray(time_series_ica), delimiter=',')

        for i, cur_img in enumerate(iter_img(components_img)):
            display = plot_stat_map(cur_img, display_mode="ortho", colorbar=True)
            display.savefig('ica_ic_' + str(i + 1) + '.png')
            display.close()

        dict_learning = DictLearning(n_components=self.inputs.n_components,
                                     smoothing_fwhm=None,
                                     low_pass=self.inputs.low_pass,
                                     high_pass=self.inputs.high_pass,
                                     t_r=self.inputs.tr,
                                     detrend=True,
                                     standardize=True,
                                     random_state=0,
                                     n_jobs=-2,
                                     memory_level=2,
                                     memory="nilearn_cache",
                                     mask_strategy = 'template')

        dict_learning.fit(self.inputs.in_file, confounds=self.inputs.confounds_file)

        components_img = dict_learning.components_img_
        components_img.to_filename('descomposition_dict.nii.gz')

        for i, cur_img in enumerate(iter_img(components_img)):
            display = plot_stat_map(cur_img, display_mode="ortho", colorbar=True)
            display.savefig('dic_ic_' + str(i + 1) + '.png')
            display.close()

        return runtime