def test_oversampling(): events = basic_paradigm() frame_times = np.linspace(0, 127, 128) X1 = make_first_level_design_matrix(frame_times, events, drift_model=None) X2 = make_first_level_design_matrix(frame_times, events, drift_model=None, oversampling=50) X3 = make_first_level_design_matrix(frame_times, events, drift_model=None, oversampling=10) # oversampling = 16 is the default so X2 = X1, X3 \neq X1, X3 close to X2 assert_almost_equal(X1.values, X2.values) assert_almost_equal(X2.values, X3.values, 0) assert_true( np.linalg.norm(X2.values - X3.values) / np.linalg.norm(X2.values) > 1.e-4) # fir model, oversampling is forced to 1 X4 = make_first_level_design_matrix(frame_times, events, hrf_model='fir', drift_model=None, fir_delays=range(0, 4), oversampling=1) X5 = make_first_level_design_matrix(frame_times, events, hrf_model='fir', drift_model=None, fir_delays=range(0, 4), oversampling=3) assert_almost_equal(X4.values, X5.values)
def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10), ) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, events) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = get_data(func_img).shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del FUNCFILE, mask, model, func_img
def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, events) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = func_img.get_data().shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del FUNCFILE, mask, model, func_img
def design_matrix(n_scans, tr, onsets, conditions, durations=None, hrf_model='spm', drift_model='cosine'): """ Fits a Ridge regression on the data, using cross validation to choose the value of alpha. Parameters ---------- n_scans: int number of scans in the session tr: float repetition time for the BOLD data onsets: array of shape [n_stimuli] onset times for stimuli in the session conditions: array of shape [n_stimuli] labels for stimuli in the session durations: array of shape [n_stimuli], optional durations for stimuli in the session hrf_model: {'spm', 'spm + derivative', 'spm + derivative + dispersion', 'glover', 'glover + derivative', 'glover + derivative + dispersion', 'fir'} HRF model to be used for creating the design matrix drift_model: {'polynomial', 'cosine', 'blank'} drift model to be used for creating the design matrix Returns ------- design: numpy array of size [n_scans, n_regressors] design matrix for the given stimuli """ frame_times = np.arange(n_scans) * tr paradigm = {} paradigm['onset'] = onsets paradigm['trial_type'] = conditions if durations is not None: paradigm['duration'] = durations paradigm = pd.DataFrame(paradigm) design = make_first_level_design_matrix(frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model) return design
def test_design_matrix0d(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix(make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_equal(len(names), 8) assert_equal(X.shape[1], 8)
def test_design_matrix0(): # Test design matrix creation when no experimental paradigm is provided tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) _, X, names = check_design_matrix(make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3)) assert_equal(len(names), 4) x = np.linspace(- 0.5, .5, 128) assert_almost_equal(X[:, 0], x)
def _make_dummy_contrasts_dmtx(): frame_times = np.linspace(0, 127 * 1., 128) dmtx = make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3, ) contrast = {'test': np.ones(4)} return contrast, dmtx
def test_high_pass(): """ test that high-pass values lead to reasonable design matrices""" n_frames = 128 tr = 2.0 frame_times = np.arange(0, tr * n_frames, tr) X = make_first_level_design_matrix(frame_times, drift_model='Cosine', high_pass=1.) assert X.shape[1] == n_frames
def _run_interface(self, runtime): import nibabel as nb from nistats import design_matrix as dm info = self.inputs.session_info img = nb.load(self.inputs.bold_file) vols = img.shape[3] drop_missing = bool(self.inputs.drop_missing) if info['sparse'] not in (None, 'None'): sparse = pd.read_hdf(info['sparse'], key='sparse').rename(columns={ 'condition': 'trial_type', 'amplitude': 'modulation' }) sparse = sparse.dropna(subset=['modulation']) # Drop NAs else: sparse = None if info['dense'] not in (None, 'None'): dense = pd.read_hdf(info['dense'], key='dense') missing_columns = dense.isna().all() if drop_missing: # Remove columns with NaNs dense = dense[dense.columns[missing_columns == False]] elif missing_columns.any(): missing_names = ', '.join( dense.columns[missing_columns].tolist()) raise RuntimeError( f'The following columns are empty: {missing_names}. ' 'Use --drop-missing to drop before model fitting.') column_names = dense.columns.tolist() drift_model = None if (('cosine00' in column_names) | ('cosine_00' in column_names)) else 'cosine' if dense.empty: dense = None column_names = None else: dense = None column_names = None drift_model = 'cosine' mat = dm.make_first_level_design_matrix( frame_times=np.arange(vols) * info['repetition_time'], events=sparse, add_regs=dense, add_reg_names=column_names, drift_model=drift_model, ) mat.to_csv('design.tsv', sep='\t') self._results['design_matrix'] = os.path.join(runtime.cwd, 'design.tsv') return runtime
def test_simple_design_matrix(self): reg = Regressor(name='test', frame_times=np.arange(10) * 2, onset=[0]) dm, conditions = my_make_first_level_design_matrix([reg]) # create true design matrix events = pd.DataFrame(columns=['onset', 'duration', 'trial_type'], data=[[0, 0, 'test']]) dm_true = design_matrix.make_first_level_design_matrix( frame_times=np.arange(10) * 2, events=events, hrf_model='spm') self.assertTrue(dm_true.equals(dm)) self.assertEqual(len(conditions), 1) self.assertTrue((conditions['test'] == np.array([1, 0])).all())
def design_matrix_light( frame_times, events=None, hrf_model='glover', drift_model='cosine', period_cut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24, path=None): """ Idem make_first_level_design_matrix, but only returns the computed matrix and associated names """ dmtx = make_first_level_design_matrix(frame_times, events, hrf_model, drift_model, period_cut, drift_order, fir_delays, add_regs, add_reg_names, min_onset) _, matrix, names = check_design_matrix(dmtx) return matrix, names
def add_design_matrix(self, hrf_model, drift_model='cosine', high_pass=.01): self.design = make_first_level_design_matrix( frame_times=self.frame_times, events=self.events, hrf_model=hrf_model, drift_model=drift_model, high_pass=high_pass, add_regs=self.regressors) return self
def get_condition_column(events, tr = 2, n_scans = 340): """converts events file to pd dataframe with column representing each condition""" frame_times = np.arange(n_scans) * tr box = make_first_level_design_matrix(frame_times, events, hrf_model = None) box = box.reset_index() x = box.iloc[:,1:4] > 0.8 y = x.astype('int') col = pd.DataFrame(y.idxmax(axis=1), columns = ['condition']) return col
def test_show_design_matrix(): # test that the show code indeed (formally) runs frame_times = np.linspace(0, 127 * 1., 128) dmtx = make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3) ax = plot_design_matrix(dmtx) assert (ax is not None) with InTemporaryDirectory(): ax = plot_design_matrix(dmtx, output_file='dmtx.png') assert os.path.exists('dmtx.png') assert (ax is None) plot_design_matrix(dmtx, output_file='dmtx.pdf') assert os.path.exists('dmtx.pdf')
def _hcp_regions_selection(fmri_img, subject_id, n_voxels=10, n_jobs=1, verbose=False): """GLM on HCP dataset.""" paradigm, t_frames = get_paradigm_hcp(subject_id) d_m = make_first_level_design_matrix(t_frames, paradigm, hrf_model='spm', drift_model='Cosine', period_cut=2 * 2 * EPOCH_DUR_HCP) glm = FirstLevelModel(t_r=TR_HCP, slice_time_ref=0.0, noise_model='ols', min_onset=10.0, signal_scaling=False, smoothing_fwhm=6.0, standardize=False, memory_level=1, memory='./.cachedir', minimize_memory=False, n_jobs=n_jobs) glm.fit(run_imgs=fmri_img, design_matrices=d_m) _, _, names = check_design_matrix(d_m) n_names = len(names) c_val = dict([(n, c) for n, c in zip(names, np.eye(n_names))]) c_val['rh-lh'] = c_val['rh'] - c_val['lh'] c_val['lh-rh'] = c_val['lh'] - c_val['rh'] z_maps = dict([(n, glm.compute_contrast(c, output_type='z_score')) for n, c in c_val.iteritems()]) z_maps = { 'rh': z_maps['rh-lh'], 'lh': z_maps['lh-rh'], 'cue': z_maps['cue'] } region_mask_imgs = {} for name, _ in [('rh', 'rh-lh'), ('lh', 'lh-rh'), ('cue', 'cue')]: z_map_vector_mask = glm.masker_.transform(z_maps[name]).flatten() z_region_vector_mask = mask_n_max(z_map_vector_mask, n_voxels) z_region_vector_mask = z_region_vector_mask.astype(float) region_mask_imgs[name] = \ glm.masker_.inverse_transform(z_region_vector_mask) return d_m, z_maps, region_mask_imgs
def test_design_matrix0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix( make_first_level_design_matrix(frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_almost_equal(X[:, 0], ax[:, 0]) ax = np.random.randn(127, 4) with pytest.raises( AssertionError, match="Incorrect specification of additional regressors:."): make_first_level_design_matrix(frame_times, add_regs=ax) ax = np.random.randn(128, 4) with pytest.raises( ValueError, match="Incorrect number of additional regressor names."): make_first_level_design_matrix(frame_times, add_regs=ax, add_reg_names='')
def test_spm_2(): # Check that the nistats design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frame_times = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] durations = 10 * np.ones(9) events = pd.DataFrame({'trial_type': conditions, 'onset': onsets, 'duration': durations}) X1 = make_first_level_design_matrix(frame_times, events, drift_model=None) spm_design_matrix = DESIGN_MATRIX['arr_1'] _, matrix, _ = check_design_matrix(X1) assert_true(((spm_design_matrix - matrix) ** 2).sum() / (spm_design_matrix ** 2).sum() < .1)
def my_make_first_level_design_matrix(regressors: list): '''Turn arbitrary number of regressors into first level design matrix. This function wraps make_first_level_design_matrix function from nistats.design_matrix module to create design matrix from list of Regressor objects. Note that this design matrix lacks confounds regressors. If you want to include confounds, pass it to the FirstLevelModel.fit method. Args: regressors: list of Regressor objects Returns (2-tuple): Final GLM design matrix as DataFrame and dictionary with condition contrast vectors for all specifified regressors. ''' if not isinstance(regressors, list) or not regressors: raise TypeError('regressors should be a non-empty list') if not all(isinstance(reg, Regressor) for reg in regressors): raise TypeError(f'regressors should be a list of {Regressor}') if not all([(r.frame_times == regressors[0].frame_times).all() for r in regressors]): raise ValueError('frame_times for all regressors should be equal') frame_times = regressors[0].frame_times # Filter empty regressors (i.e. miss regressor for subjects with no misses) regressors = [r for r in regressors if r.is_empty == False] # Combine regressors into dataframe joined_regs_names = [r.name for r in regressors] joined_regs = pd.DataFrame( data=np.hstack([r.values for r in regressors]), index=frame_times, columns=joined_regs_names ) # Compute design matrix dm = design_matrix.make_first_level_design_matrix( frame_times=frame_times, add_regs=joined_regs, add_reg_names=joined_regs_names ) # Create condition vectors for all regressors of interest conditions = {r.name: np.zeros(dm.shape[1]) for r in regressors} for condition_name in conditions: conditions[condition_name][list(dm.columns).index(condition_name)] = 1 return (dm, conditions)
def test_csv_io(): # test the csv io on design matrices tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) events = modulated_event_paradigm() DM = make_first_level_design_matrix(frame_times, events, hrf_model='glover', drift_model='polynomial', drift_order=3) path = 'design_matrix.csv' with InTemporaryDirectory(): DM.to_csv(path) DM2 = pd.DataFrame().from_csv(path) _, matrix, names = check_design_matrix(DM) _, matrix_, names_ = check_design_matrix(DM2) assert_almost_equal(matrix, matrix_) assert_equal(names, names_)
def test_design_matrix_with_duration_and_modulation(self): frame_times = np.arange(10) * 2 reg = Regressor('test', frame_times, onset=[0, 10], duration=[.2, .3], modulation=[2, 4]) dm, conditions = my_make_first_level_design_matrix([reg]) # true design matrix (used demeaned modulation) events = pd.DataFrame( columns=['onset', 'duration', 'trial_type', 'modulation'], data=[[0, .2, 'test', -1], [10, .3, 'test', 1]]) dm_true = design_matrix.make_first_level_design_matrix( frame_times=np.arange(10) * 2, events=events, hrf_model='spm') self.assertTrue(dm_true.equals(dm)) self.assertEqual(len(conditions), 1) self.assertTrue((conditions['test'] == np.array([1, 0])).all())
def report_flm_adhd_dmn(): # pragma: no cover t_r = 2. slice_time_ref = 0. n_scans = 176 pcc_coords = (0, -53, 26) adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1) seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2., memory='nilearn_cache', memory_level=1, verbose=0) seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0]) frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans) design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm', add_regs=seed_time_series, add_reg_names=["pcc_seed"]) dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1)) contrasts = {'seed_based_glm': dmn_contrast} first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref) first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0], design_matrices=design_matrix) report = make_glm_report( first_level_model, contrasts=contrasts, title='ADHD DMN Report', cluster_threshold=15, height_control='bonferroni', min_distance=8., plot_type='glass', report_dims=(1200, 'a'), ) output_filename = 'generated_report_flm_adhd_dmn.html' output_filepath = os.path.join(REPORTS_DIR, output_filename) report.save_as_html(output_filepath) report.get_iframe()
def test_complex_design_matrix_with_duration(self): frame_times = np.arange(100) * 2 reg1 = Regressor('test1', frame_times, onset=[0, 100]) reg2 = Regressor('test2', frame_times, onset=[50, 150], duration=[.1, .1]) dm, conditions = my_make_first_level_design_matrix([reg1, reg2]) # true design matrix events = pd.DataFrame(columns=['onset', 'duration', 'trial_type'], data=[[0, 0, 'test1'], [100, 0, 'test1'], [50, 0.1, 'test2'], [150, 0.1, 'test2']]) dm_true = design_matrix.make_first_level_design_matrix( frame_times=np.arange(100) * 2, events=events, hrf_model='spm') self.assertTrue(dm_true.equals(dm)) self.assertEqual({'test1', 'test2'}, set(conditions.keys())) self.assertEqual(conditions['test1'][0], 1) self.assertEqual(np.sum(conditions['test1']), 1) self.assertEqual(conditions['test2'][1], 1) self.assertEqual(np.sum(conditions['test2']), 1)
def make_dmtx(events, n_scans, t_r=2.): from pandas import read_csv frame_times = np.arange(n_scans) * t_r events = read_csv(events, sep='\t') complexs = [ 'complex_sentence_objclef', 'complex_sentence_objrel', 'complex_sentence_subjrel' ] simples = [ 'simple_sentence_adj', 'simple_sentence_coord', 'simple_sentence_cvp' ] for complex_ in complexs: events = events.replace(complex_, 'complex') for simple_ in simples: events = events.replace(simple_, 'simple') dmtx = make_first_level_design_matrix(frame_times, events=events, hrf_model='spm', drift_model=None) dmtx.drop(columns='constant', inplace=True) return dmtx # remove the constant regressor
from nistats.design_matrix import make_first_level_design_matrix design_matrices = [] ######################################################################### # loop over the two sessions for idx, img in enumerate(fmri_img, start=1): # Build experimental paradigm n_scans = img.shape[-1] events = pd.read_table(subject_data['events{}'.format(idx)]) # Define the sampling times for the design matrix frame_times = np.arange(n_scans) * tr # Build design matrix with the reviously defined parameters design_matrix = make_first_level_design_matrix( frame_times, events, hrf_model=hrf_model, drift_model=drift_model, period_cut=period_cut, ) # put the design matrices in a list design_matrices.append(design_matrix) ######################################################################### # We can specify basic contrasts (to get beta maps). # We start by specifying canonical contrast that isolate design matrix columns contrast_matrix = np.eye(design_matrix.shape[1]) basic_contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) #########################################################################
# Prepare seed pcc_coords = (0, -53, 26) ######################################################################### # Estimate contrasts # ------------------ # Specify the contrasts seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2., memory='nilearn_cache', memory_level=1, verbose=0) seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0]) frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans) design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm', add_regs=seed_time_series, add_reg_names=["pcc_seed"]) dmn_contrast = np.array([1] + [0]*(design_matrix.shape[1]-1)) contrasts = {'seed_based_glm': dmn_contrast} ######################################################################### # Perform first level analysis # ---------------------------- # Setup and fit GLM first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref) first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0], design_matrices=design_matrix) ######################################################################### # contrast estimation print('Contrast seed_based_glm computed.')
######################################################################### # Empty lists in which we are going to store activation values. z_scores_right = [] z_scores_left = [] for (fmri_img, confound, events) in zip( models_run_imgs, models_confounds, models_events): texture = surface.vol_to_surf(fmri_img[0], fsaverage.pial_right) n_scans = texture.shape[1] frame_times = t_r * (np.arange(n_scans) + .5) # Create the design matrix # # We specify an hrf model containing Glover model and its time derivative # the drift model is implicitly a cosine basis with period cutoff 128s. design_matrix = make_first_level_design_matrix( frame_times, events=events[0], hrf_model='glover + derivative', add_regs=confound[0]) # contrast_specification contrast_values = (design_matrix.columns == 'language') * 1.0 -\ (design_matrix.columns == 'string') # Setup and fit GLM. # Note that the output consists in 2 variables: `labels` and `fit` # `labels` tags voxels according to noise autocorrelation. # `estimates` contains the parameter estimates. # We input them for contrast computation. labels, estimates = run_glm(texture.T, design_matrix.values) contrast = compute_contrast(labels, estimates, contrast_values, contrast_type='t') # we present the Z-transform of the t map
def make_dmtx(events, fmri, mask_img, confounds=None, t_r=2, compcorr=False, task='audio', normalize=True): """ Generates the design matrix to fit the single GLM approach to a particular fmri session Parameters ---------- events: tsv file contains information about the onset, duration and condition of the images fmri: 4D nifti file neuroimaging data mask_img: nifti-like object mask image to compute high variance confounds confounds: txt file, default=None file with information about the confounds t_r: int, default=2 repetition time of the acquisition in seconds compcorr: bool, default=False whether to estimate high variance confounds or not normalize: bool, default=True If True, normalize the stim (i.e., give them arbitrary numbers from 0 to n) Returns ------- design_matrix: pandas.DataFrame object design matrix with one trial per column """ n_scans = nib.load(fmri).shape[3] # define the time stamps for different images frame_times = np.linspace(0, (n_scans - 1) * t_r, n_scans) if task == 'audio': mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1]) n_cycles = 28 cycle_duration = 20 t_r = 2 cycle = np.arange(0, cycle_duration, t_r)[mask > 0] frame_times = np.tile(cycle, n_cycles) +\ np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum()) frame_times = frame_times[:-2] # for some reason... paradigm = read_csv(events, sep='\t') if normalize: paradigm['trial_type'] = [condition.split('_')[0] for condition in paradigm['trial_type']] if confounds: motion = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] conf = np.loadtxt(confounds) if compcorr: hv_conf = high_variance_confounds(fmri, mask_img=mask_img) conf = np.hstack((hv_conf, conf)) motion = ['conf_%d' % i for i in range(5)] + motion else: conf = None if normalize: trial_type = paradigm["trial_type"].values for condition in set(trial_type): n_conds = (trial_type == condition).sum() trial_type[trial_type == condition] = ['%s_%02d' % (condition, i) for i in range(n_conds)] paradigm["trial_type"] = trial_type dmtx = make_first_level_design_matrix(frame_times, events=paradigm, hrf_model='spm', add_regs=conf, add_reg_names=motion) return dmtx
def make_dmtxs(events, fmri, confounds=None, t_r=2, mumford=True, task='audio'): """ Generates the design matrices to fit a GLMs approach to a particular fmri session. Every design matrix contains one regressor for the trial of interest, and another regressor that sums every other trial Parameters ---------- events: tsv file contains information about the onset, duration and condition of the images fmri: 4D nifti file neuroimaging data confounds: txt file, default=None file with information about the confounds t_r: int, default=2 repetition time of the acquisition in seconds mumford: bool, default True variable grouping criteria for each design matrix. If True, each trial will keep its labeling name and every other trial will be grouped in a 'nuisance' regressor. If False, each trial will keep an unique name, all the other trials of its same category will be grouped in another regressor, and each other category will be modeled separately Returns ------- design_matrix_list: list of pandas.DataFrame objects one design matrix per trial, with said trial as the regressor of interest and all other trials as nuisance regressors trial_names: list of str Original names of the trials. Used to generate spectrograms """ n_scans = nib.load(fmri).shape[3] # define the time stamps for different images frame_times = np.linspace(0, (n_scans - 1) * t_r, n_scans) if task == 'audio': mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1]) n_cycles = 28 cycle_duration = 20 t_r = 2 cycle = np.arange(0, cycle_duration, t_r)[mask > 0] frame_times = np.tile(cycle, n_cycles) +\ np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum()) frame_times = frame_times[:-2] # for some reason... paradigm = read_csv(events, sep='\t') split_trials = [condition.split('_')[0] for condition in paradigm['trial_type']] if confounds: motion = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] conf = np.loadtxt(confounds) else: conf = None design_matrix_list = [] trial_names = [] trial_n = len(paradigm.index) for i in range(trial_n): paradigm_copy = paradigm.copy() trial_type = paradigm_copy['trial_type'] this_trial = trial_type.iloc[i] if mumford: paradigm_copy['trial_type'] = np.where(trial_type.index == i, trial_type, 'nuisance') else: paradigm_copy['trial_type'] = np.where(trial_type.index == i, "{}_00".format(trial_type[i].split("_")[0]), split_trials) dmtx = make_first_level_design_matrix(frame_times, events=paradigm_copy, hrf_model='spm', add_regs=conf, add_reg_names=motion) design_matrix_list.append(dmtx) trial_names.append(this_trial) return design_matrix_list, trial_names
# Prepare seed pcc_coords = (0, -53, 26) ######################################################################### # Estimate contrasts # ------------------ # Specify the contrasts seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2., memory='nilearn_cache', memory_level=1, verbose=0) seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0]) frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans) design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm', add_regs=seed_time_series, add_reg_names=["pcc_seed"]) dmn_contrast = np.array([1] + [0]*(design_matrix.shape[1]-1)) contrasts = {'seed_based_glm': dmn_contrast} ######################################################################### # Perform first level analysis # ---------------------------- # Setup and fit GLM first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref) first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0], design_matrices=design_matrix) ######################################################################### # contrast estimation print('Contrast seed_based_glm computed.')
from nistats.design_matrix import make_first_level_design_matrix design_matrices = [] ######################################################################### # loop over the two sessions for idx, img in enumerate(fmri_img, start=1): # Build experimental paradigm n_scans = img.shape[-1] events = pd.read_table(subject_data['events{}'.format(idx)]) # Define the sampling times for the design matrix frame_times = np.arange(n_scans) * tr # Build design matrix with the reviously defined parameters design_matrix = make_first_level_design_matrix( frame_times, events, hrf_model=hrf_model, drift_model=drift_model, period_cut=period_cut, ) # put the design matrices in a list design_matrices.append(design_matrix) ######################################################################### # We can specify basic contrasts (to get beta maps). # We start by specifying canonical contrast that isolate design matrix columns contrast_matrix = np.eye(design_matrix.shape[1]) basic_contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) #########################################################################
def denoise(img_file, tsv_file, out_path, col_names=False, hp_filter=False, lp_filter=False, out_figure_path=False): nii_ext = '.nii.gz' FD_thr = [.5] sc_range = np.arange(-1, 3) constant = 'constant' # read in files img = load_niimg(img_file) # get file info img_name = os.path.basename(img.get_filename()) file_base = img_name[0:img_name.find('.')] save_img_file = pjoin(out_path, file_base + \ '_NR' + nii_ext) data = img.get_data() df_orig = pandas.read_csv(tsv_file, '\t', na_values='n/a') df = copy.deepcopy(df_orig) Ntrs = df.as_matrix().shape[0] print('# of TRs: ' + str(Ntrs)) assert (Ntrs == data.shape[len(data.shape) - 1]) # select columns to use as nuisance regressors if col_names: df = df[col_names] str_append = ' [SELECTED regressors in CSV]' else: col_names = df.columns.tolist() str_append = ' [ALL regressors in CSV]' # fill in missing nuisance values with mean for that variable for col in df.columns: if sum(df[col].isnull()) > 0: print('Filling in ' + str(sum(df[col].isnull())) + ' NaN value for ' + col) df[col] = df[col].fillna(np.mean(df[col])) print('# of Confound Regressors: ' + str(len(df.columns)) + str_append) # implement HP filter in regression TR = img.header.get_zooms()[-1] frame_times = np.arange(Ntrs) * TR if hp_filter: hp_filter = float(hp_filter) assert (hp_filter > 0) period_cutoff = 1. / hp_filter df = make_first_level_design_matrix(frame_times, period_cut=period_cutoff, add_regs=df.as_matrix(), add_reg_names=df.columns.tolist()) # fn adds intercept into dm hp_cols = [col for col in df.columns if 'drift' in col] print('# of High-pass Filter Regressors: ' + str(len(hp_cols))) else: # add in intercept column into data frame df[constant] = 1 print('No High-pass Filter Applied') dm = df.as_matrix() # prep data data = np.reshape(data, (-1, Ntrs)) data_mean = np.mean(data, axis=1) Nvox = len(data_mean) # setup and run regression model = regression.OLSModel(dm) results = model.fit(data.T) if not hp_filter: results_orig_resid = copy.deepcopy(results.resid) # save for rsquared computation # apply low-pass filter if lp_filter: # input to butterworth fn is time x voxels low_pass = float(lp_filter) Fs = 1. / TR if low_pass >= Fs / 2: raise ValueError('Low pass filter cutoff if too close to the Nyquist frequency (%s)' % (Fs / 2)) temp_img_file = pjoin(out_path, file_base + \ '_temp' + nii_ext) temp_img = nb.Nifti1Image(np.reshape(results.resid.T + np.reshape(data_mean, (Nvox, 1)), img.shape).astype('float32'), img.affine, header=img.header) temp_img.to_filename(temp_img_file) results.resid = butterworth(results.resid, sampling_rate=Fs, low_pass=low_pass, high_pass=None) print('Low-pass Filter Applied: < ' + str(low_pass) + ' Hz') # add mean back into data clean_data = results.resid.T + np.reshape(data_mean, (Nvox, 1)) # add mean back into residuals # save out new data file print('Saving output file...') clean_data = np.reshape(clean_data, img.shape).astype('float32') new_img = nb.Nifti1Image(clean_data, img.affine, header=img.header) new_img.to_filename(save_img_file) ######### generate Rsquared map for confounds only if hp_filter: # first remove low-frequency information from data hp_cols.append(constant) model_first = regression.OLSModel(df[hp_cols].as_matrix()) results_first = model_first.fit(data.T) results_first_resid = copy.deepcopy(results_first.resid) del results_first, model_first # compute sst - borrowed from matlab sst = np.square(np.linalg.norm(results_first_resid - np.mean(results_first_resid, axis=0), axis=0)) # now regress out 'true' confounds to estimate their Rsquared nr_cols = [col for col in df.columns if 'drift' not in col] model_second = regression.OLSModel(df[nr_cols].as_matrix()) results_second = model_second.fit(results_first_resid) # compute sse - borrowed from matlab sse = np.square(np.linalg.norm(results_second.resid, axis=0)) del results_second, model_second, results_first_resid elif not hp_filter: # compute sst - borrowed from matlab sst = np.square(np.linalg.norm(data.T - np.mean(data.T, axis=0), axis=0)) # compute sse - borrowed from matlab sse = np.square(np.linalg.norm(results_orig_resid, axis=0)) del results_orig_resid # compute rsquared of nuisance regressors zero_idx = scipy.logical_and(sst == 0, sse == 0) sse[zero_idx] = 1 sst[zero_idx] = 1 # would be NaNs - become rsquared = 0 rsquare = 1 - np.true_divide(sse, sst) rsquare[np.isnan(rsquare)] = 0 ######### Visualizing DM & outputs fontsize = 12 fontsize_title = 14 def_img_size = 8 if not out_figure_path: out_figure_path = save_img_file[0:save_img_file.find('.')] + '_figures' if not os.path.isdir(out_figure_path): os.mkdir(out_figure_path) png_append = '_' + img_name[0:img_name.find('.')] + '.png' print('Output directory: ' + out_figure_path) # DM corr matrix cm = df[df.columns[0:-1]].corr() curr_sz = copy.deepcopy(def_img_size) if cm.shape[0] > def_img_size: curr_sz = curr_sz + ((cm.shape[0] - curr_sz) * .3) mtx_scale = curr_sz * 100 mask = np.zeros_like(cm, dtype=np.bool) mask[np.triu_indices_from(mask)] = True fig, ax = plt.subplots(figsize=(curr_sz, curr_sz)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(cm, mask=mask, cmap=cmap, center=0, vmax=cm[cm < 1].max().max(), vmin=cm[cm < 1].min().min(), square=True, linewidths=.5, cbar_kws={"shrink": .6}) ax.set_xticklabels(ax.get_xticklabels(), rotation=60, ha='right', fontsize=fontsize) ax.set_yticklabels(cm.columns.tolist(), rotation=-30, va='bottom', fontsize=fontsize) ax.set_title('Nuisance Corr. Matrix', fontsize=fontsize_title) plt.tight_layout() file_corr_matrix = 'Corr_matrix_regressors' + png_append fig.savefig(pjoin(out_figure_path, file_corr_matrix)) plt.close(fig) del fig, ax # DM of Nuisance Regressors (all) tr_label = 'TR (Volume #)' fig, ax = plt.subplots(figsize=(curr_sz - 4.1, def_img_size)) x_scale_html = ((curr_sz - 4.1) / def_img_size) * 890 reporting.plot_design_matrix(df, ax=ax) ax.set_title('Nuisance Design Matrix', fontsize=fontsize_title) ax.set_xticklabels(ax.get_xticklabels(), rotation=60, ha='right', fontsize=fontsize) ax.set_yticklabels(ax.get_yticklabels(), fontsize=fontsize) ax.set_ylabel(tr_label, fontsize=fontsize) plt.tight_layout() file_design_matrix = 'Design_matrix' + png_append fig.savefig(pjoin(out_figure_path, file_design_matrix)) plt.close(fig) del fig, ax # FD timeseries plot FD = 'FD' poss_names = ['FramewiseDisplacement', FD, 'framewisedisplacement', 'fd'] fd_idx = [df_orig.columns.__contains__(i) for i in poss_names] if np.sum(fd_idx) > 0: FD_name = poss_names[fd_idx == True] if sum(df_orig[FD_name].isnull()) > 0: df_orig[FD_name] = df_orig[FD_name].fillna(np.mean(df_orig[FD_name])) y = df_orig[FD_name].as_matrix() Nremove = [] sc_idx = [] for thr_idx, thr in enumerate(FD_thr): idx = y >= thr sc_idx.append(copy.deepcopy(idx)) for iidx in np.where(idx)[0]: for buffer in sc_range: curr_idx = iidx + buffer if curr_idx >= 0 and curr_idx <= len(idx): sc_idx[thr_idx][curr_idx] = True Nremove.append(np.sum(sc_idx[thr_idx])) Nplots = len(FD_thr) sns.set(font_scale=1.5) sns.set_style('ticks') fig, axes = plt.subplots(Nplots, 1, figsize=(def_img_size * 1.5, def_img_size / 2), squeeze=False) sns.despine() bound = .4 fd_mean = np.mean(y) for curr in np.arange(0, Nplots): axes[curr, 0].plot(y) axes[curr, 0].plot((-bound, Ntrs + bound), FD_thr[curr] * np.ones((1, 2))[0], '--', color='black') axes[curr, 0].scatter(np.arange(0, Ntrs), y, s=20) if Nremove[curr] > 0: info = scipy.ndimage.measurements.label(sc_idx[curr]) for cluster in np.arange(1, info[1] + 1): temp = np.where(info[0] == cluster)[0] axes[curr, 0].axvspan(temp.min() - bound, temp.max() + bound, alpha=.5, color='red') axes[curr, 0].set_ylabel('Framewise Disp. (' + FD + ')') axes[curr, 0].set_title(FD + ': ' + str(100 * Nremove[curr] / Ntrs)[0:4] + '% of scan (' + str(Nremove[curr]) + ' volumes) would be scrubbed (FD thr.= ' + str(FD_thr[curr]) + ')') plt.text(Ntrs + 1, FD_thr[curr] - .01, FD + ' = ' + str(FD_thr[curr]), fontsize=fontsize) plt.text(Ntrs, fd_mean - .01, 'avg = ' + str(fd_mean), fontsize=fontsize) axes[curr, 0].set_xlim((-bound, Ntrs + 8)) plt.tight_layout() axes[curr, 0].set_xlabel(tr_label) file_fd_plot = FD + '_timeseries' + png_append fig.savefig(pjoin(out_figure_path, file_fd_plot)) plt.close(fig) del fig, axes print(FD + ' timeseries plot saved') else: print(FD + ' not found: ' + FD + ' timeseries not plotted') file_fd_plot = None # Carpet and DVARS plots - before & after nuisance regression # need to create mask file to input to DVARS function mask_file = pjoin(out_figure_path, 'mask_temp.nii.gz') nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False) nifti_masker.fit(img) nifti_masker.mask_img_.to_filename(mask_file) # create 2 or 3 carpet plots, depending on if LP filter is also applied Ncarpet = 2 total_sz = int(16) carpet_scale = 840 y_labels = ['Input (voxels)', 'Output \'cleaned\''] imgs = [img, new_img] img_files = [img_file, save_img_file] color = ['red', 'salmon'] labels = ['input', 'cleaned'] if lp_filter: Ncarpet = 3 total_sz = int(20) carpet_scale = carpet_scale * (9/8) y_labels = ['Input', 'Clean Pre-LP', 'Clean LP'] imgs.insert(1, temp_img) img_files.insert(1, temp_img_file) color.insert(1, 'firebrick') labels.insert(1, 'clean pre-LP') labels[-1] = 'clean LP' dvars = [] print('Computing dvars...') for in_file in img_files: temp = nac.compute_dvars(in_file=in_file, in_mask=mask_file)[1] dvars.append(np.hstack((temp.mean(), temp))) del temp small_sz = 2 fig = plt.figure(figsize=(def_img_size * 1.5, def_img_size + ((Ncarpet - 2) * 1))) row_used = 0 if np.sum(fd_idx) > 0: # if FD data is available row_used = row_used + small_sz ax0 = plt.subplot2grid((total_sz, 1), (0, 0), rowspan=small_sz) ax0.plot(y) ax0.scatter(np.arange(0, Ntrs), y, s=10) curr = 0 if Nremove[curr] > 0: info = scipy.ndimage.measurements.label(sc_idx[curr]) for cluster in np.arange(1, info[1] + 1): temp = np.where(info[0] == cluster)[0] ax0.axvspan(temp.min() - bound, temp.max() + bound, alpha=.5, color='red') ax0.set_ylabel(FD) for side in ["top", "right", "bottom"]: ax0.spines[side].set_color('none') ax0.spines[side].set_visible(False) ax0.set_xticks([]) ax0.set_xlim((-.5, Ntrs - .5)) ax0.spines["left"].set_position(('outward', 10)) ax_d = plt.subplot2grid((total_sz, 1), (row_used, 0), rowspan=small_sz) for iplot in np.arange(len(dvars)): ax_d.plot(dvars[iplot], color=color[iplot], label=labels[iplot]) ax_d.set_ylabel('DVARS') for side in ["top", "right", "bottom"]: ax_d.spines[side].set_color('none') ax_d.spines[side].set_visible(False) ax_d.set_xticks([]) ax_d.set_xlim((-.5, Ntrs - .5)) ax_d.spines["left"].set_position(('outward', 10)) ax_d.legend(fontsize=fontsize - 2) row_used = row_used + small_sz st = 0 carpet_each = int((total_sz - row_used) / Ncarpet) for idx, img_curr in enumerate(imgs): ax_curr = plt.subplot2grid((total_sz, 1), (row_used + st, 0), rowspan=carpet_each) fig = plotting.plot_carpet(img_curr, figure=fig, axes=ax_curr) ax_curr.set_ylabel(y_labels[idx]) for side in ["bottom", "left"]: ax_curr.spines[side].set_position(('outward', 10)) if idx < len(imgs)-1: ax_curr.spines["bottom"].set_visible(False) ax_curr.set_xticklabels('') ax_curr.set_xlabel('') st = st + carpet_each file_carpet_plot = 'Carpet_plots' + png_append fig.savefig(pjoin(out_figure_path, file_carpet_plot)) plt.close() del fig, ax0, ax_curr, ax_d, dvars os.remove(mask_file) print('Carpet/DVARS plots saved') if lp_filter: os.remove(temp_img_file) del temp_img # Display T-stat maps for nuisance regressors # create mean img img_size = (img.shape[0], img.shape[1], img.shape[2]) mean_img = nb.Nifti1Image(np.reshape(data_mean, img_size), img.affine) mx = [] for idx, col in enumerate(df.columns): if not 'drift' in col and not constant in col: con_vector = np.zeros((1, df.shape[1])) con_vector[0, idx] = 1 con = results.Tcontrast(con_vector) mx.append(np.max(np.absolute([con.t.min(), con.t.max()]))) mx = .8 * np.max(mx) t_png = 'Tstat_' file_tstat = [] for idx, col in enumerate(df.columns): if not 'drift' in col and not constant in col: con_vector = np.zeros((1, df.shape[1])) con_vector[0, idx] = 1 con = results.Tcontrast(con_vector) m_img = nb.Nifti1Image(np.reshape(con, img_size), img.affine) title_str = col + ' Tstat' fig = plotting.plot_stat_map(m_img, mean_img, threshold=3, colorbar=True, display_mode='z', vmax=mx, title=title_str, cut_coords=7) file_temp = t_png + col + png_append fig.savefig(pjoin(out_figure_path, file_temp)) file_tstat.append({'name': col, 'file': file_temp}) plt.close() del fig, file_temp print(title_str + ' map saved') # Display R-sq map for nuisance regressors m_img = nb.Nifti1Image(np.reshape(rsquare, img_size), img.affine) title_str = 'Nuisance Rsq' mx = .95 * rsquare.max() fig = plotting.plot_stat_map(m_img, mean_img, threshold=.2, colorbar=True, display_mode='z', vmax=mx, title=title_str, cut_coords=7) file_rsq_map = 'Rsquared' + png_append fig.savefig(pjoin(out_figure_path, file_rsq_map)) plt.close() del fig print(title_str + ' map saved') ######### html report templateLoader = jinja2.FileSystemLoader(searchpath="/") templateEnv = jinja2.Environment(loader=templateLoader) templateVars = {"img_file": img_file, "save_img_file": save_img_file, "Ntrs": Ntrs, "tsv_file": tsv_file, "col_names": col_names, "hp_filter": hp_filter, "lp_filter": lp_filter, "file_design_matrix": file_design_matrix, "file_corr_matrix": file_corr_matrix, "file_fd_plot": file_fd_plot, "file_rsq_map": file_rsq_map, "file_tstat": file_tstat, "x_scale": x_scale_html, "mtx_scale": mtx_scale, "file_carpet_plot": file_carpet_plot, "carpet_scale": carpet_scale } TEMPLATE_FILE = pjoin(os.getcwd(), "report_template.html") template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(templateVars) html_file = pjoin(out_figure_path, img_name[0:img_name.find('.')] + '.html') with open(html_file, "w") as f: f.write(outputText) print('') print('HTML report: ' + html_file) return new_img
######################################################################### # Create design matrices # ------------------------------------- # The same parameters allow us to obtain a variety of design matrices # We first create an events object import pandas as pd events = pd.DataFrame({'trial_type': conditions, 'onset': onsets, 'duration': duration}) ######################################################################### # We sample the events into a design matrix, also including additional regressors hrf_model = 'glover' from nistats.design_matrix import make_first_level_design_matrix X1 = make_first_level_design_matrix( frame_times, events, drift_model='polynomial', drift_order=3, add_regs=motion, add_reg_names=add_reg_names, hrf_model=hrf_model) ######################################################################### # Now we compute a block design matrix. We add duration to create the blocks. # For this we first define an event structure that includes the duration parameter duration = 7. * np.ones(len(conditions)) events = pd.DataFrame({'trial_type': conditions, 'onset': onsets, 'duration': duration}) ######################################################################### # Then we sample the design matrix X2 = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3, hrf_model=hrf_model) #########################################################################
from nistats.design_matrix import make_first_level_design_matrix design_matrices = [] ######################################################################### # Loop over the two sessions. for idx, img in enumerate(fmri_img, start=1): # Build experimental paradigm n_scans = img.shape[-1] events = pd.read_table(subject_data['events{}'.format(idx)]) # Define the sampling times for the design matrix frame_times = np.arange(n_scans) * tr # Build design matrix with the reviously defined parameters design_matrix = make_first_level_design_matrix( frame_times, events, hrf_model=hrf_model, drift_model=drift_model, high_pass=high_pass, ) # put the design matrices in a list design_matrices.append(design_matrix) ######################################################################### # We can specify basic contrasts (to get beta maps). # We start by specifying canonical contrast that isolate design matrix columns. contrast_matrix = np.eye(design_matrix.shape[1]) basic_contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) #########################################################################
all_subs = [sj] data = make_median_soma_sub(all_subs,file_extension,out_dir,median_gii=median_gii) events_avg = make_median_soma_events(all_subs) # make DM TR = analysis_params["TR"] # specifying the timing of fMRI frames frame_times = TR * (np.arange(data.shape[-1])) # Create the design matrix, hrf model containing Glover model design_matrix = make_first_level_design_matrix(frame_times, events=events_avg, hrf_model='glover' ) # plot design matrix and save just to check if everything fine plot = plot_design_matrix(design_matrix) fig = plot.get_figure() fig.savefig(os.path.join(out_dir,'design_matrix.svg'), dpi=100,bbox_inches = 'tight') print('fitting GLM to %d vertices'%data.shape[0]) soma_params = Parallel(n_jobs=16)(delayed(fit_glm)(vert, design_matrix.values) for _,vert in enumerate(data)) soma_params = np.vstack(soma_params) # save estimates in dir estimates_filename = os.path.join(out_dir,'sub-{sj}_ses-01_task-soma_run-median_space-fsaverage_hemi-both_{ext}'.format(sj=sj,ext=file_extension)) estimates_filename = estimates_filename.replace('.func.gii','_estimates.npz')
# # This involves computing the design matrix and fitting the model. # We start by specifying the timing of fMRI frames import numpy as np n_scans = texture.shape[1] frame_times = t_r * (np.arange(n_scans) + .5) ######################################################################### # Create the design matrix # # We specify an hrf model containing Glover model and its time derivative # the drift model is implicitly a cosine basis with period cutoff 128s. from nistats.design_matrix import make_first_level_design_matrix design_matrix = make_first_level_design_matrix(frame_times, events=events, hrf_model='glover + derivative' ) ######################################################################### # Setup and fit GLM. # Note that the output consists in 2 variables: `labels` and `fit` # `labels` tags voxels according to noise autocorrelation. # `estimates` contains the parameter estimates. # We keep them for later contrast computation. from nistats.first_level_model import run_glm labels, estimates = run_glm(texture.T, design_matrix.values) ######################################################################### # Estimate contrasts # ------------------