def run(args=None, config=None): parser = AnalysisParser('config') args = parser.parse_analysis_args(args) config = args.config subs = ['CoRe_011', 'CoRe_023', 'CoRe_054', 'CoRe_079', 'CoRe_082', 'CoRe_087', 'CoRe_094', 'CoRe_100', 'CoRe_107', 'CoRe_155', 'CoRe_192', 'CoRe_195', 'CoRe_220', 'CoRe_235', 'CoRe_267', 'CoRe_268'] base_path = '/media/sf_hcp/sleepdata/' for sb_i in np.arange(0,len(subs)): sb_i=1 sb = subs[sb_i] day1_fmri = glob.glob(base_path + sb + '/proc/*nii') day1_vmrk = glob.glob(base_path + sb + '/proc/*Day*1*_N*vmrk') print(day1_fmri) fmri = nib.load(day1_fmri[0]) canica = CanICA(n_components=40, smoothing_fwhm=6., threshold=None, verbose=10, random_state=0) fmri_info = helpers.fmri_info(day1_fmri[0]) canica.fit(fmri) cimg = canica.components_img_.get_data() TR = fmri_info[0] tr_times = np.arange(0, 30, TR) hrf = helpers.get_hrf(tr_times) # %matplotlib auto for i in np.arange(0,40): plt.subplot(4,10,i+1) plt.imshow(np.max(cimg[:,:,:,i],axis=2)) plt.title(str(i)) # in order: DMN, auditory, visual, lingual, parietal, striatal, thalamic comps = [35,28,30,39,8,9,32] allcomp_ts = canica.transform([fmri])[0].transpose() comps_ts = allcomp_ts[comps,:] network_labs = ['DMN','auditory','visual','lingual', 'parietal','striatal','thalamic'] for i in np.arange(0,len(comps)): plt.subplot(2,5,i+1) plt.imshow(np.max(cimg[:,:,:,comps[i]],axis=2)) plt.title(network_labs[i]) np.save(str.replace(day1_fmri[0],'.nii','_comps'), comps_ts)
detrend=True, high_pass=0.008, t_r=0.72) ## Run ICA start = time.time() print("RUNNING ICA") canica.fit(nifti_list) end = time.time() print('Elapsed time %f' % (end - start)) # Save-stuff canica.components_img_.to_filename('wm_ica_components_d%i.nii.gz' % (n_components)) canica.mask_img_.to_filename('wm_ica_mask.nii.gz') ## Project all data into ICA space print('Transforming data...') start = time.time() X = canica.transform(nifti_list) end = time.time() print('Elapsed time %f' % (end - start)) # Save result_dict = dict() result_dict['X'] = X result_dict['data_list'] = nifti_list sio.savemat('wm_ica_ts_d%i' % (n_components), result_dict)
def run(args=None, config=None): parser = AnalysisParser('config') args = parser.parse_analysis_args(args) config = args.config eeg_path = '/media/sf_shared/graddata/ica_denoised_raw.fif' fmri_path = '/media/sf_shared/CoRe_011/rfMRI/d2/11-BOLD_Sleep_BOLD_Sleep_20150824220820_11.nii' vmrk_path = '/media/sf_shared/CoRe_011/eeg/CoRe_011_Day2_Night_01.vmrk' event_ids, event_lats = helpers.read_vmrk(vmrk_path) event_lats = np.array(event_lats) grad_inds = [ index for index, value in enumerate(event_ids) if value == 'R1' ] grad_inds = np.array(grad_inds) grad_lats = event_lats[grad_inds] grad_lats = grad_lats / 20 # resample from 5000Hz to 250Hz start_ind = int(grad_lats[0]) end_ind = int(grad_lats[-1]) canica = CanICA(n_components=40, smoothing_fwhm=6., threshold=None, verbose=10, random_state=0) fmri = nib.load(fmri_path) # get TR, n_slices, and n_TRs fmri_info = helpers.fmri_info(fmri_path) canica.fit(fmri) cimg = canica.components_img_.get_data() TR = fmri_info[0] tr_times = np.arange(0, 30, TR) hrf = get_hrf(tr_times) # plot components for i in np.arange(0, 40): plt.subplot(4, 10, i + 1) plt.imshow(np.max(cimg[:, :, :, i], axis=2)) # get the EEG raw = mne.io.read_raw_fif(eeg_path, preload=True) raw_data = raw.get_data() # get power spectrum for different sleep stages (BOLD) comps = canica.transform([fmri])[0].transpose() bold_srate = 1 / fmri_info[0] bold_epochl = int(7500 / (250 / bold_srate)) #bold_pxx,bold_f = pxx_bold_component_epoch(comps, bold_srate, 250, bold_epochl, sleep_stages) #eeg_pxx,eeg_f = pxx_eeg_epochs(raw_data, sleep_stages, 7500) # concatenate the epochs, then compute the psd # 1) get triggers, 2) concatenate data, 3) compute psd def get_trigger_inds(trigger_name, event_ids): trig_inds = [ index for index, value in enumerate(event_ids) if value == trigger_names[trig] ] return trig_inds def epoch_triggers(raw_data, lats, pre_samples, post_samples): epochs = np.zeros( (raw_data.shape[0], lats.shape[0], pre_samples + post_samples)) for lat in np.arange(0, lats.shape[0]): epochs[:, lat, :] = raw_data[:, lats[lat] - pre_samples:lats[lat] + post_samples] return epochs trigger_names = ['wake', 'NREM1', 'NREM2', 'NREM3'] """ epoch BOLD and get power for different trigger types what you actually want is single trial EEG and BOLD psd first get all the indices that are contained within the BOLD timeseries then, get the EEG power spectrum values within those same indices """ eeg_srate = 250 bold_pre_samples = 15 bold_post_samples = 25 eeg_pre_samples = int(bold_pre_samples * fmri_info[0] * eeg_srate) eeg_post_samples = int(bold_post_samples * fmri_info[0] * eeg_srate) bold_conversion = eeg_srate / (1 / fmri_info[0]) all_bold_epochs = [] all_eeg_epochs = [] for trig in np.arange(0, len(trigger_names)): trig_inds = get_trigger_inds(trigger_names[trig], event_ids) trig_lats = event_lats[trig_inds] bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int) bads = np.where((bold_lats - bold_pre_samples < 0) | (bold_lats + bold_post_samples >= comps.shape[1])) bold_lats = np.delete(bold_lats, bads, axis=0) eeg_lats = np.delete(trig_lats, bads, axis=0) bold_epochs = epoch_triggers(comps, bold_lats, bold_pre_samples, bold_post_samples) eeg_epochs = epoch_triggers(raw_data, eeg_lats, eeg_pre_samples, eeg_post_samples) all_bold_epochs.append(bold_epochs) all_eeg_epochs.append(eeg_epochs) # comput power for i in np.arange(0, len(all_eeg_epochs)): eeg_epochs = all_eeg_epochs[i] bold_epochs = all_bold_epochs[i] bold_f, bold_pxx = signal.welch(bold_epochs) eeg_f, eeg_pxx = signal.welch(eeg_epochs) gauss = signal.gaussian(eeg_srate, 20) gauss = gauss / np.sum(gauss) freqs = np.zeros((5, 2)) freqs[0, 0] = 1 freqs[0, 1] = 3 freqs[1, 0] = 4 freqs[1, 1] = 7 freqs[2, 0] = 8 freqs[2, 1] = 15 freqs[3, 0] = 17 freqs[3, 1] = 30 freqs[4, 0] = 30 freqs[4, 1] = 80 chan_freqs = filter_and_downsample(raw_data, comps, freqs, start_ind, end_ind) conved = convolve_chanfreqs(np.log(chan_freqs), hrf) # epoch all the hrf-convolved filtered EEG power all_conved_epochs = [] for trig in np.arange(0, len(trigger_names)): trig_inds = get_trigger_inds(trigger_names[trig], event_ids) trig_lats = event_lats[trig_inds] bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int) bads = np.where((bold_lats - bold_pre_samples < 0) | (bold_lats + bold_post_samples >= comps.shape[1])) bold_lats = np.delete(bold_lats, bads, axis=0) conved_epochs = np.zeros( (conved.shape[0], conved.shape[1], bold_lats.shape[0], bold_pre_samples + bold_post_samples)) for i in np.arange(0, conved.shape[1]): conved_epochs[:, i, :] = epoch_triggers(conved[:, i, :], bold_lats, bold_pre_samples, bold_post_samples) all_conved_epochs.append(conved_epochs) sig1 = chan_freqs[3, 2, :] sig2 = comps[0, :] sig2 = butter_bandpass_filter(sig2, 0.005, 0.1, 1 / fmri_info[0]) nlags = 50 def xcorr(sig1, sig2, nlags): vec_l = sig1.shape[0] - nlags xcorrs = np.zeros(nlags) vec1 = sig1[int(sig1.shape[0] / 2 - vec_l / 2):int(sig1.shape[0] / 2 + vec_l / 2)] start_p = 0 for i in np.arange(0, nlags): vec2 = sig2[(start_p + i):(start_p + vec_l + i)] xcorrs[i] = np.corrcoef(vec1, vec2)[0, 1] return xcorrs all_xcorrs = [] for i in np.arange(0, len(all_conved_epochs)): xc_i = np.zeros( (1, all_conved_epochs[i].shape[1], all_conved_epochs[i].shape[2], all_bold_epochs[i].shape[0], 20)) for j in np.arange(0, 1): print(j) for k in np.arange(0, all_conved_epochs[i].shape[1]): for el in np.arange(0, all_conved_epochs[i].shape[2]): for m in np.arange(0, all_bold_epochs[i].shape[0]): xc_i[j, k, el, m, :] = xcorr(all_conved_epochs[i][5, k, el, :], all_bold_epochs[i][m, el, :], 20) all_xcorrs.append(xc_i) plt.plot(np.mean(all_xcorrs[1][0, 1, :, 0, :], axis=0)) plt.plot(np.mean(all_xcorrs[2][0, 1, :, 0, :], axis=0)) plt.plot(np.mean(all_xcorrs[3][0, 1, :, 0, :], axis=0)) # correlate power across different epochs """