def test_hkernel(): """ test the hrf computation """ tr = 2.0 h = _hrf_kernel('spm', tr) assert_almost_equal(h[0], spm_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel('spm + derivative', tr) assert_almost_equal(h[1], spm_time_derivative(tr)) assert_equal(len(h), 2) h = _hrf_kernel('spm + derivative + dispersion', tr) assert_almost_equal(h[2], spm_dispersion_derivative(tr)) assert_equal(len(h), 3) h = _hrf_kernel('glover', tr) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel('glover + derivative', tr) assert_almost_equal(h[1], glover_time_derivative(tr)) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 2) h = _hrf_kernel('fir', tr, fir_delays=np.arange(4)) assert_equal(len(h), 4) for dh in h: assert_equal(dh.sum(), 50.) # h = _hrf_kernel(None, tr) assert_equal(len(h), 1) assert_almost_equal(h[0], np.hstack((1, np.zeros(49))))
def preproc_file(deriv_dir, sub_metadata, deriv_bold_fname=deriv_bold_fname): deriv_bold = deriv_dir.ensure(deriv_bold_fname) with open(str(sub_metadata), 'r') as md: bold_metadata = json.load(md) tr = bold_metadata["RepetitionTime"] # time_points tp = 200 ix = np.arange(tp) # create voxel timeseries task_onsets = np.zeros(tp) # add activations at every 40 time points # waffles task_onsets[0::40] = 1 # fries task_onsets[3::40] = 1.5 # milkshakes task_onsets[6::40] = 2 signal = np.convolve(task_onsets, spm_hrf(tr))[0:len(task_onsets)] # csf csf = np.cos(2 * np.pi * ix * (50 / tp)) * 0.1 # white matter wm = np.sin(2 * np.pi * ix * (22 / tp)) * 0.1 # voxel time series (signal and noise) voxel_ts = signal + csf + wm # a 4d matrix with 2 identical timeseries img_data = np.array([[[voxel_ts, voxel_ts]]]) # make a nifti image img = nib.Nifti1Image(img_data, np.eye(4)) # save the nifti image img.to_filename(str(deriv_bold)) return deriv_bold
def _get_hrf_model(hrf_model=None, hrf_length=25., dt=1., normalize=False): """Returns HRF created with model hrf_model. If hrf_model is None, then a vector of 0 is returned Parameters ---------- hrf_model: str hrf_length: float dt: float normalize: bool Returns ------- hrf_0: hrf """ if hrf_model == 'glover': hrf_0 = glover_hrf(tr=1., oversampling=1./dt, time_length=hrf_length) elif hrf_model == 'spm': hrf_0 = spm_hrf(tr=1., oversampling=1./dt, time_length=hrf_length) elif hrf_model == 'gamma': hrf_0 = _gamma_difference_hrf(1., oversampling=1./dt, time_length=hrf_length, onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167) elif hrf_model == 'bezier': # Bezier curves. We can indicate where is the undershoot and the peak etc hrf_0 = bezier_hrf(hrf_length=hrf_length, dt=dt, pic=[6,1], picw=2, ushoot=[15,-0.2], ushootw=3, normalize=normalize) elif hrf_model == 'physio': # Balloon model. By default uses the parameters of Khalidov11 hrf_0 = physio_hrf(hrf_length=hrf_length, dt=dt, normalize=normalize) else: # Mean 0 if no hrf_model is specified hrf_0 = np.zeros(hrf_length/dt) warnings.warn("The HRF model is not recognized, setting it to None") if normalize and hrf_model is not None: hrf_0 = hrf_0 / np.linalg.norm(hrf_0) return hrf_0
def test_hkernel(): """ test the hrf computation """ tr = 2.0 h = _hrf_kernel('spm', tr) assert_almost_equal(h[0], spm_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel('spm_time', tr) assert_almost_equal(h[1], spm_time_derivative(tr)) assert_equal(len(h), 2) h = _hrf_kernel('spm_time_dispersion', tr) assert_almost_equal(h[2], spm_dispersion_derivative(tr)) assert_equal(len(h), 3) h = _hrf_kernel('canonical', tr) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel('canonical with derivative', tr) assert_almost_equal(h[1], glover_time_derivative(tr)) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 2) h = _hrf_kernel('fir', tr, fir_delays=np.arange(4)) assert_equal(len(h), 4) for dh in h: assert_equal(dh.sum(), 16.)
def test_hkernel(): """ test the hrf computation """ tr = 2.0 h = _hrf_kernel("spm", tr) assert_almost_equal(h[0], spm_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel("spm + derivative", tr) assert_almost_equal(h[1], spm_time_derivative(tr)) assert_equal(len(h), 2) h = _hrf_kernel("spm + derivative + dispersion", tr) assert_almost_equal(h[2], spm_dispersion_derivative(tr)) assert_equal(len(h), 3) h = _hrf_kernel("glover", tr) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 1) h = _hrf_kernel("glover + derivative", tr) assert_almost_equal(h[1], glover_time_derivative(tr)) assert_almost_equal(h[0], glover_hrf(tr)) assert_equal(len(h), 2) h = _hrf_kernel("fir", tr, fir_delays=np.arange(4)) assert_equal(len(h), 4) for dh in h: assert_equal(dh.sum(), 16.0)
def test_spm_hrf(): """ test that the spm_hrf is correctly normalized and has correct length """ h = spm_hrf(2.0) assert_almost_equal(h.sum(), 1) assert_equal(len(h), 800)
def test_beta_series(): # base directory base_dir = os.path.join(os.getcwd(), 'tmp') os.makedirs(base_dir, exist_ok=True) bold_file = os.path.join(base_dir, 'bold.nii.gz') mask_file = os.path.join(base_dir, 'mask.nii.gz') events_file = os.path.join(base_dir, 'events.tsv') confounds_file = os.path.join(base_dir, 'confounds.tsv') selected_confounds = ['WhiteMatter', 'CSF'] # repetition time 2 seconds tr = 2 bold_metadata = {"RepetitionTime": tr, "TaskName": "whodis"} # time_points tp = 200 ix = np.arange(tp) # the selected hrf model hrf_model = 'spm' # create voxel timeseries task_onsets = np.zeros(tp) # add activations at every 40 time points task_onsets[0::40] = 1 signal = np.convolve(task_onsets, spm_hrf(tr))[0:len(task_onsets)] # csf csf = np.cos(2 * np.pi * ix * (50 / tp)) * 0.1 # white matter wm = np.sin(2 * np.pi * ix * (22 / tp)) * 0.1 # voxel time series (signal and noise) voxel_ts = signal + csf + wm # make the confounds tsv confounds_df = pd.DataFrame({'WhiteMatter': wm, 'CSF': csf}) confounds_df.to_csv(confounds_file, index=False, sep='\t') # a 4d matrix with 2 identical timeseries img_data = np.array([[[voxel_ts, voxel_ts]]]) # make a nifti image img = nib.Nifti1Image(img_data, np.eye(4)) # save the nifti image img.to_filename(bold_file) # make the mask file bm_data = np.array([[[1, 1]]], dtype=np.int16) bm_img = nib.Nifti1Image(bm_data, np.eye(4)) bm_img.to_filename(mask_file) # create event tsv onsets = np.multiply(np.where(task_onsets == 1), tr).reshape(5) durations = [1] * onsets.size trial_types = ['testCond'] * onsets.size events_df = pd.DataFrame.from_dict({ 'onset': onsets, 'duration': durations, 'trial_type': trial_types }) # reorder columns events_df = events_df[['onset', 'duration', 'trial_type']] # save the events_df to file events_df.to_csv(events_file, index=False, sep='\t') beta_series = BetaSeries(bold_file=bold_file, bold_metadata=bold_metadata, mask_file=mask_file, events_file=events_file, confounds_file=confounds_file, selected_confounds=selected_confounds, hrf_model=hrf_model, smoothing_kernel=None, low_pass=None) res = beta_series.run() assert os.path.isfile(res.outputs.beta_maps) # clean files if type(res.outputs.beta_maps) is list: for f in res.outputs.beta_maps: os.remove(f) else: os.remove(res.outputs.beta_maps) shutil.rmtree(base_dir)
def _generate_X(self): """ Generates X (design matrix). """ single_trial = self.single_trial # Generate I trials for P conditions conds = np.tile(np.arange(self.P), self.I) conds = np.random.permutation(conds) # shuffle trials if len(conds) % len(self.ISIs) != 0: raise ValueError( "Please choose ISIs which can spread across trials evenly.") # Generate ISIs and shuffle ISIs = np.repeat(self.ISIs, len(conds) / len(self.ISIs)) ISIs = np.random.permutation(ISIs) run_dur = (np.sum(ISIs) + self.I_dur * len(conds)) # run-duration osf = 10 # oversampling factor for onsets/hrf if single_trial: # nr of regressors = conditions * trials X = np.zeros((run_dur * osf, self.P * self.I)) else: # nr regressors = nr conditions X = np.zeros((run_dur * osf, self.P)) current_onset = 0 # start creating onsets for i, trial in enumerate(conds): if single_trial: X[current_onset:(current_onset + self.I_dur * osf), i] = 1 else: X[current_onset:(current_onset + self.I_dur * osf), trial] = 1 this_ITI = self.I_dur * osf + ISIs[i] * osf current_onset += this_ITI # Define HRF if self.hrf is None: hrf = spm_hrf(tr=self.TR, oversampling=self.TR * osf, time_length=32.0, onset=0.0) hrf = hrf / np.max(hrf) # scale HRF, peak = 1 else: hrf = self.hrf # If confound model is 'additive', create a regressor based on confound # and add to X to take care of convolution (to be used later when "controlling" # for its influence) if self.conf_params is not None: if self.conf is None: conf = self._generate_conf(conds) self.run_iter = 0 self.conf = conf else: conf = self.conf # for run-wise self.run_iter += 2 else: conf = np.zeros(conds.size) self.conf = conf self.run_iter = 0 conf_pred = np.zeros(run_dur * osf) if self.single_trial: conf_pred[X.sum(axis=1) != 0] = np.repeat(conf, repeats=osf) else: for condition in range(self.P): n_trials = np.sum(X[:, condition] != 0) this_value = conf[self.run_iter + condition] conf_pred[X[:, condition] != 0] = np.repeat(this_value, repeats=n_trials) X = np.c_[X, conf_pred] # Convolve regressors with HRF X = np.hstack([ np.convolve(X[:, i], hrf)[:run_dur * osf, np.newaxis] for i in range(X.shape[1]) ]) X = X[::self.TR * osf, :] # downsample X = np.c_[np.ones(X.shape[0]), X] # stack intercept self.X = X self.conds = conds