Example #1
0
def test_oversampling():
    events = basic_paradigm()
    frame_times = np.linspace(0, 127, 128)
    X1 = make_first_level_design_matrix(frame_times, events, drift_model=None)
    X2 = make_first_level_design_matrix(frame_times,
                                        events,
                                        drift_model=None,
                                        oversampling=50)
    X3 = make_first_level_design_matrix(frame_times,
                                        events,
                                        drift_model=None,
                                        oversampling=10)

    # oversampling = 50 by default so X2 = X1, X3 \neq X1, X3 close to X2
    assert_almost_equal(X1.values, X2.values)
    assert_almost_equal(X2.values, X3.values, 0)
    assert (np.linalg.norm(X2.values - X3.values) / np.linalg.norm(X2.values) >
            1.e-4)

    # fir model, oversampling is forced to 1
    X4 = make_first_level_design_matrix(frame_times,
                                        events,
                                        hrf_model='fir',
                                        drift_model=None,
                                        fir_delays=range(0, 4),
                                        oversampling=1)
    X5 = make_first_level_design_matrix(frame_times,
                                        events,
                                        hrf_model='fir',
                                        drift_model=None,
                                        fir_delays=range(0, 4),
                                        oversampling=10)
    assert_almost_equal(X4.values, X5.values)
Example #2
0
def test_first_level_design_creation():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3)
        model = model.fit(func_img, events)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = get_data(func_img).shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_first_level_design_matrix(frame_times,
                                                events,
                                                drift_model='polynomial',
                                                drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del FUNCFILE, mask, model, func_img
Example #3
0
def _make_dummy_contrasts_dmtx():
    frame_times = np.linspace(0, 127 * 1., 128)
    dmtx = make_first_level_design_matrix(frame_times,
                                          drift_model='polynomial',
                                          drift_order=3,
                                          )
    contrast = {'test': np.ones(4)}
    return contrast, dmtx
Example #4
0
def test_high_pass():
    """ test that high-pass values lead to reasonable design matrices"""
    n_frames = 128
    tr = 2.0
    frame_times = np.arange(0, tr * n_frames, tr)
    X = make_first_level_design_matrix(frame_times,
                                       drift_model='Cosine',
                                       high_pass=1.)
    assert X.shape[1] == n_frames
Example #5
0
def test_design_matrix0():
    # Test design matrix creation when no experimental paradigm is provided
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    _, X, names = check_design_matrix(
        make_first_level_design_matrix(frame_times,
                                       drift_model='polynomial',
                                       drift_order=3))
    assert len(names) == 4
    x = np.linspace(-0.5, .5, 128)
    assert_almost_equal(X[:, 0], x)
Example #6
0
def test_design_matrix0d():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(
        make_first_level_design_matrix(frame_times,
                                       drift_model='polynomial',
                                       drift_order=3,
                                       add_regs=ax))
    assert len(names) == 8
    assert X.shape[1] == 8
Example #7
0
def test_design_matrix0c():
    # test design matrix creation when regressors are provided manually
    rng = np.random.RandomState(42)
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = rng.standard_normal(size=(128, 4))
    _, X, names = check_design_matrix(
        make_first_level_design_matrix(frame_times,
                                       drift_model='polynomial',
                                       drift_order=3,
                                       add_regs=ax))
    assert_almost_equal(X[:, 0], ax[:, 0])
    ax = rng.standard_normal(size=(127, 4))
    with pytest.raises(
            AssertionError,
            match="Incorrect specification of additional regressors:."):
        make_first_level_design_matrix(frame_times, add_regs=ax)
    ax = rng.standard_normal(size=(128, 4))
    with pytest.raises(
            ValueError,
            match="Incorrect number of additional regressor names."):
        make_first_level_design_matrix(frame_times,
                                       add_regs=ax,
                                       add_reg_names='')
    # with pandas Dataframe
    axdf = pd.DataFrame(ax)
    _, X1, names = check_design_matrix(
        make_first_level_design_matrix(frame_times,
                                       drift_model='polynomial',
                                       drift_order=3,
                                       add_regs=axdf))
    assert_almost_equal(X1[:, 0], ax[:, 0])
    assert_array_equal(names[:4], np.arange(4))
Example #8
0
def test_show_design_matrix():
    # test that the show code indeed (formally) runs
    frame_times = np.linspace(0, 127 * 1., 128)
    dmtx = make_first_level_design_matrix(
        frame_times, drift_model='polynomial', drift_order=3)
    ax = plot_design_matrix(dmtx)
    assert (ax is not None)
    with InTemporaryDirectory():
        ax = plot_design_matrix(dmtx, output_file='dmtx.png')
        assert os.path.exists('dmtx.png')
        assert (ax is None)
        plot_design_matrix(dmtx, output_file='dmtx.pdf')
        assert os.path.exists('dmtx.pdf')
Example #9
0
def test_design_matrix0c():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(
        make_first_level_design_matrix(frame_times,
                                       drift_model='polynomial',
                                       drift_order=3,
                                       add_regs=ax))
    assert_almost_equal(X[:, 0], ax[:, 0])
    ax = np.random.randn(127, 4)
    with pytest.raises(
            AssertionError,
            match="Incorrect specification of additional regressors:."):
        make_first_level_design_matrix(frame_times, add_regs=ax)
    ax = np.random.randn(128, 4)
    with pytest.raises(
            ValueError,
            match="Incorrect number of additional regressor names."):
        make_first_level_design_matrix(frame_times,
                                       add_regs=ax,
                                       add_reg_names='')
Example #10
0
def test_spm_2():
    # Check that the nistats design matrix is close enough to the SPM one
    # (it cannot be identical, because the hrf shape is different)
    frame_times = np.linspace(0, 99, 100)
    conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
    onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
    durations = 10 * np.ones(9)
    events = pd.DataFrame({
        'trial_type': conditions,
        'onset': onsets,
        'duration': durations
    })
    X1 = make_first_level_design_matrix(frame_times, events, drift_model=None)
    spm_design_matrix = DESIGN_MATRIX['arr_1']
    _, matrix, _ = check_design_matrix(X1)
    assert (((spm_design_matrix - matrix)**2).sum() /
            (spm_design_matrix**2).sum() < .1)
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords],
                                     radius=10,
                                     detrend=True,
                                     standardize=True,
                                     low_pass=0.1,
                                     high_pass=0.01,
                                     t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1,
                                     verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes,
                                                   hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)

    report = make_glm_report(
        first_level_model,
        contrasts=contrasts,
        title='ADHD DMN Report',
        cluster_threshold=15,
        height_control='bonferroni',
        min_distance=8.,
        plot_type='glass',
        report_dims=(1200, 'a'),
    )
    output_filename = 'generated_report_flm_adhd_dmn.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
Example #12
0
def test_csv_io():
    # test the csv io on design matrices
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    events = modulated_event_paradigm()
    DM = make_first_level_design_matrix(
        frame_times,
        events,
        hrf_model='glover',
        drift_model='polynomial',
        drift_order=3,
    )
    path = 'design_matrix.csv'
    with InTemporaryDirectory():
        DM.to_csv(path)
        DM2 = pd.read_csv(path, index_col=0)

    _, matrix, names = check_design_matrix(DM)
    _, matrix_, names_ = check_design_matrix(DM2)
    assert_almost_equal(matrix, matrix_)
    assert names == names_
Example #13
0
def design_matrix_light(frame_times,
                        events=None,
                        hrf_model='glover',
                        drift_model='cosine',
                        high_pass=.01,
                        drift_order=1,
                        fir_delays=None,
                        add_regs=None,
                        add_reg_names=None,
                        min_onset=-24,
                        path=None):
    """ Same as make_first_level_design_matrix,
    but only returns the computed matrix and associated name.
    """
    fir_delays = fir_delays if fir_delays else [0]
    dmtx = make_first_level_design_matrix(frame_times, events, hrf_model,
                                          drift_model, high_pass, drift_order,
                                          fir_delays, add_regs, add_reg_names,
                                          min_onset)
    _, matrix, names = check_design_matrix(dmtx)
    return matrix, names
Example #14
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm.first_level import design_matrix as dm
        info = self.inputs.session_info
        img = nb.load(self.inputs.bold_file)
        if isinstance(img, nb.Cifti2Image):
            vols = img.shape[0]
        elif isinstance(img, nb.Nifti1Image):
            vols = img.shape[3]
        elif isinstance(img, nb.GiftiImage):
            vols = len(img.darrays)
        else:
            raise ValueError(
                f"Unknown image type ({img.__class__.__name__}) <{self.inputs.bold_file}>"
            )

        drop_missing = bool(self.inputs.drop_missing)
        drift_model = self.inputs.drift_model

        if info['sparse'] not in (None, 'None'):
            sparse = pd.read_hdf(info['sparse'],
                                 key='sparse').rename(columns={
                                     'condition': 'trial_type',
                                     'amplitude': 'modulation'
                                 })
            if 'modulation' in sparse.columns:
                sparse = sparse.dropna(subset=['modulation'])  # Drop NAs
        else:
            sparse = None

        if info['dense'] not in (None, 'None'):
            dense = pd.read_hdf(info['dense'], key='dense')

            missing_columns = dense.isna().all()
            if drop_missing:
                # Remove columns with NaNs
                dense = dense[dense.columns[~missing_columns]]
            elif missing_columns.any():
                missing_names = ', '.join(
                    dense.columns[missing_columns].tolist())
                raise RuntimeError(
                    f'The following columns are empty: {missing_names}. '
                    'Use --drop-missing to drop before model fitting.')

            column_names = dense.columns.tolist()
            drift_model = None if (('cosine00' in column_names) |
                                   ('cosine_00' in column_names)) else \
                                       drift_model

            if dense.empty:
                dense = None
                column_names = None
        else:
            dense = None
            column_names = None

        mat = dm.make_first_level_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            events=sparse,
            add_regs=dense,
            hrf_model=None,  # XXX: Consider making an input spec parameter
            add_reg_names=column_names,
            drift_model=drift_model)

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')
        return runtime
Example #15
0
def create_design_matrix(tr,
                         frame_times,
                         events,
                         hrf_model='kay',
                         hrf_idx=None):
    """ Creates a design matrix based on a HRF from Kendrick Kay's set
    or a default one from Nilearn. """

    # This is to keep oversampling consistent across hrf_models
    hrf_oversampling = 10
    design_oversampling = tr / (0.1 / hrf_oversampling)

    if hrf_model != 'kay':  # just use Nilearn!
        return make_first_level_design_matrix(frame_times,
                                              events,
                                              drift_model=None,
                                              min_onset=0,
                                              oversampling=design_oversampling,
                                              hrf_model=hrf_model)

    if hrf_model == 'kay':
        if hrf_idx is None:  # 20 different DMs (based on different HRFs)
            to_iter = range(HRFS_HR.shape[1])
        else:  # use the supplied HRF idx (e.g., 5)
            to_iter = [hrf_idx]

        dms = []  # will store all design matrices
        for hrf_idx in to_iter:  # iterate across all HRFs
            hrf = HRFS_HR[:, hrf_idx]
            # scale HRF to have the same max as the glover HRF
            # makes comparison easier
            hrf /= (hrf.max() / 0.249007)

            # Get info
            trial_type, onset, duration, modulation = check_events(events)

            # Pre-allocate design matrix; note: columns are alphabetically sorted
            X = np.zeros((frame_times.size, np.unique(trial_type).size))
            uniq_trial_types = np.unique(trial_type)  # this is sorted

            # Create separate regressor for each unique trial type
            # Code copied from Nilearn glm module
            for i, condition in enumerate(uniq_trial_types):
                condition_mask = (trial_type == condition)
                exp_condition = (onset[condition_mask],
                                 duration[condition_mask],
                                 modulation[condition_mask])
                # Create high resolution regressor/frame times
                hr_regressor, hr_frame_times = _sample_condition(
                    exp_condition, frame_times, design_oversampling, 0)

                # Convolve with HRF and downsample
                conv_reg = np.convolve(hr_regressor, hrf)[:hr_regressor.size]
                # linear interpolation for now ...
                f = interp1d(hr_frame_times, conv_reg)
                X[:, i] = f(frame_times).T

            # Note to self: do not scale such that max(X, axis=0) is 1, because you'll lose info
            # about predictor variance!
            dm = pd.DataFrame(X, columns=uniq_trial_types, index=frame_times)
            dm['constant'] = 1  # and intercept/constant
            dms.append(dm)

        if len(dms) == 1:
            # Just return single design matrix
            dms = dms[0]

        return dms
fd = open(sd.func[0].split(".")[0] + "_onset.txt", "w")
for c, o, d in zip(conditions, onset, duration):
    fd.write("%s %s %s\r\n" % (c, o, d))
fd.close()

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct design matrix
nscans = len(subject_data.func[0])
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
drift_model = 'Cosine'
hrf_model = 'spm + derivative'
design_matrix = make_first_level_design_matrix(frametimes,
                                               paradigm,
                                               hrf_model=hrf_model,
                                               drift_model=drift_model,
                                               high_pass=hfcut)

# plot and save design matrix
ax = plot_design_matrix(design_matrix)
ax.set_position([.05, .25, .9, .65])
ax.set_title('Design matrix')
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png')
plt.savefig(dmat_outfile, bbox_inches="tight", dpi=200)

# specify contrasts
contrasts = {}
_, matrix, names = check_design_matrix(design_matrix)
contrast_matrix = np.eye(len(names))
for i in range(len(names)):
Example #17
0
def first_level(subject_dic,
                additional_regressors=None,
                compcorr=False,
                smooth=None,
                mesh=False,
                mask_img=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject

    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whether confound estimation and removal should be done or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    start_time = time.ctime()
    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = subject_dic['hrf_model']
    high_pass = subject_dic['high_pass']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']
    slice_time_ref = 1.

    if not mesh and (mask_img is None):
        mask_img = masking(subject_dic['func'], subject_dic['output_dir'])

    if additional_regressors is None:
        additional_regressors = dict([
            (session_id, None) for session_id in subject_dic['session_id']
        ])

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):

        task_id = _session_id_to_task_id([session_id])[0]

        if mesh is not False:
            from nibabel.gifti import read
            n_scans = np.array(
                [darrays.data for darrays in read(fmri_path).darrays]).shape[0]
        else:
            n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(slice_time_ref,
                                 (n_scans - 1 + slice_time_ref) * tr, n_scans)
        if task_id == 'audio':
            mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1])
            n_cycles = 28
            cycle_duration = 20
            t_r = 2
            cycle = np.arange(0, cycle_duration, t_r)[mask > 0]
            frametimes = np.tile(cycle, n_cycles) +\
                np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum())
            frametimes = frametimes[:-2]  # for some reason...

        if mesh is not False:
            compcorr = False  # XXX Fixme

        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names
        else:
            confounds = motion
            confound_names = motion_names

        if onset is None:
            warnings.warn('Onset file not provided. Trying to guess it')
            task = os.path.basename(fmri_path).split('task')[-1][4:]
            onset = os.path.join(
                os.path.split(os.path.dirname(fmri_path))[0], 'model001',
                'onsets', 'task' + task + '_run001', 'task%s.csv' % task)

        if not os.path.exists(onset):
            warnings.warn('non-existant onset file. proceeding without it')
            paradigm = None
        else:
            paradigm = make_paradigm(onset, task_id)

        # handle manually supplied regressors
        add_reg_names = []
        if additional_regressors[session_id] is None:
            add_regs = confounds
        else:
            df = read_csv(additional_regressors[session_id])
            add_regs = []
            for regressor in df:
                add_reg_names.append(regressor)
                add_regs.append(df[regressor])
            add_regs = np.array(add_regs).T
            add_regs = np.hstack((add_regs, confounds))

        add_reg_names += confound_names

        # create the design matrix
        design_matrix = make_first_level_design_matrix(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            high_pass=high_pass,
            add_regs=add_regs,
            add_reg_names=add_reg_names)
        _, dmtx, names = check_design_matrix(design_matrix)

        # create the relevant contrasts
        contrasts = make_contrasts(task_id, names)

        if mesh == 'fsaverage5':
            # this is low-resolution data
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_fsaverage5_%s' % session_id)
        elif mesh == 'fsaverage7':
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_fsaverage7_%s' % session_id)
        elif mesh == 'individual':
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_individual_%s' % session_id)
        else:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'),
                 design_matrix=design_matrix)

        if mesh is not False:
            run_surface_glm(design_matrix, contrasts, fmri_path,
                            subject_session_output_dir)
        else:
            z_maps, fmri_glm = run_glm(design_matrix,
                                       contrasts,
                                       fmri_path,
                                       mask_img,
                                       subject_dic,
                                       subject_session_output_dir,
                                       tr=tr,
                                       slice_time_ref=slice_time_ref,
                                       smoothing_fwhm=smooth)

            # do stats report
            anat_img = nib.load(subject_dic['anat'])
            stats_report_filename = os.path.join(subject_session_output_dir,
                                                 'report_stats.html')

            report = make_glm_report(
                fmri_glm,
                contrasts,
                threshold=3.0,
                bg_img=anat_img,
                cluster_threshold=15,
                title="GLM for subject %s" % session_id,
            )
            report.save_as_html(stats_report_filename)
Example #18
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm.first_level import design_matrix as dm

        info = self.inputs.design_info
        img = nb.load(self.inputs.bold_file)
        if isinstance(img, nb.Cifti2Image):
            vols = img.shape[0]
        elif isinstance(img, nb.Nifti1Image):
            vols = img.shape[3]
        elif isinstance(img, nb.GiftiImage):
            vols = len(img.darrays)
        else:
            raise ValueError(
                f"Unknown image type ({img.__class__.__name__}) <{self.inputs.bold_file}>"
            )

        drop_missing = bool(self.inputs.drop_missing)
        drift_model = self.inputs.drift_model

        if info['dense'] not in (None, 'None'):
            dense = pd.read_hdf(info['dense'], key='dense')

            missing_columns = dense.isna().all()
            if drop_missing:
                # Remove columns with NaNs
                dense = dense[dense.columns[~missing_columns]]
            elif missing_columns.any():
                missing_names = ', '.join(
                    dense.columns[missing_columns].tolist())
                raise RuntimeError(
                    f'The following columns are empty: {missing_names}. '
                    'Use --drop-missing to drop before model fitting.')

            column_names = dense.columns.tolist()
            drift_model = (None if
                           (('cosine00' in column_names) |
                            ('cosine_00' in column_names)) else drift_model)

            if dense.empty:
                dense = None
                column_names = None
        else:
            dense = None
            column_names = None

        # XXX hack for pybids giving us a bad matrix
        # Case seems to be when (TR * nvols) rounds up instead of down
        if len(dense) == vols + 1 and np.isnan(dense[-1:].values).all():
            dense = dense[:-1]

        mat = dm.make_first_level_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            add_regs=dense,
            hrf_model=None,  # XXX: Consider making an input spec parameter
            add_reg_names=column_names,
            drift_model=drift_model,
        )

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')
        return runtime
Example #19
0
def denoise(img_file,
            tsv_file,
            out_path,
            col_names=False,
            hp_filter=False,
            lp_filter=False,
            out_figure_path=False):
    nii_ext = '.nii.gz'
    FD_thr = [.5]
    sc_range = np.arange(-1, 3)
    constant = 'constant'

    # read in files
    img = load_niimg(img_file)
    # get file info
    img_name = os.path.basename(img.get_filename())
    file_base = img_name[0:img_name.find('.')]
    save_img_file = pjoin(out_path, file_base + \
                          '_NR' + nii_ext)
    data = img.get_fdata()
    df_orig = pandas.read_csv(tsv_file, '\t', na_values='n/a')
    df = copy.deepcopy(df_orig)
    Ntrs = df.values.shape[0]
    print('# of TRs: ' + str(Ntrs))
    assert (Ntrs == data.shape[len(data.shape) - 1])

    # select columns to use as nuisance regressors
    if col_names:
        df = df[col_names]
        str_append = '  [SELECTED regressors in CSV]'
    else:
        col_names = df.columns.tolist()
        str_append = '  [ALL regressors in CSV]'

    # fill in missing nuisance values with mean for that variable
    for col in df.columns:
        if sum(df[col].isnull()) > 0:
            print('Filling in ' + str(sum(df[col].isnull())) +
                  ' NaN value for ' + col)
            df[col] = df[col].fillna(np.mean(df[col]))
    print('# of Confound Regressors: ' + str(len(df.columns)) + str_append)

    # implement HP filter in regression
    TR = img.header.get_zooms()[-1]
    frame_times = np.arange(Ntrs) * TR
    if hp_filter:
        hp_filter = float(hp_filter)
        assert (hp_filter > 0)
        df = make_first_level_design_matrix(frame_times,
                                            high_pass=hp_filter,
                                            add_regs=df.values,
                                            add_reg_names=df.columns.tolist())
        # fn adds intercept into dm

        hp_cols = [col for col in df.columns if 'drift' in col]
        print('# of High-pass Filter Regressors: ' + str(len(hp_cols)))
    else:
        # add in intercept column into data frame
        df[constant] = 1
        print('No High-pass Filter Applied')

    dm = df.values

    # prep data
    data = np.reshape(data, (-1, Ntrs))
    data_mean = np.mean(data, axis=1)
    Nvox = len(data_mean)

    # setup and run regression
    model = regression.OLSModel(dm)
    results = model.fit(data.T)
    if not hp_filter:
        results_orig_resid = copy.deepcopy(
            results.residuals)  # save for rsquared computation

    # apply low-pass filter
    if lp_filter:
        # input to butterworth fn is time x voxels
        low_pass = float(lp_filter)
        Fs = 1. / TR
        if low_pass >= Fs / 2:
            raise ValueError(
                'Low pass filter cutoff if too close to the Nyquist frequency (%s)'
                % (Fs / 2))

        temp_img_file = pjoin(out_path, file_base + \
                              '_temp' + nii_ext)
        temp_img = nb.Nifti1Image(np.reshape(
            results.residuals.T + np.reshape(data_mean, (Nvox, 1)),
            img.shape).astype('float32'),
                                  img.affine,
                                  header=img.header)
        temp_img.to_filename(temp_img_file)
        results.residuals = butterworth(results.residuals,
                                        sampling_rate=Fs,
                                        low_pass=low_pass,
                                        high_pass=None)
        print('Low-pass Filter Applied: < ' + str(low_pass) + ' Hz')

    # add mean back into data
    clean_data = results.residuals.T + np.reshape(
        data_mean, (Nvox, 1))  # add mean back into residuals

    # save out new data file
    print('Saving output file...')
    clean_data = np.reshape(clean_data, img.shape).astype('float32')
    new_img = nb.Nifti1Image(clean_data, img.affine, header=img.header)
    new_img.to_filename(save_img_file)

    ######### generate Rsquared map for confounds only
    if hp_filter:
        # first remove low-frequency information from data
        hp_cols.append(constant)
        model_first = regression.OLSModel(df[hp_cols].values)
        results_first = model_first.fit(data.T)
        results_first_resid = copy.deepcopy(results_first.residuals)
        del results_first, model_first

        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(results_first_resid -
                           np.mean(results_first_resid, axis=0),
                           axis=0))

        # now regress out 'true' confounds to estimate their Rsquared
        nr_cols = [col for col in df.columns if 'drift' not in col]
        model_second = regression.OLSModel(df[nr_cols].values)
        results_second = model_second.fit(results_first_resid)

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_second.residuals, axis=0))

        del results_second, model_second, results_first_resid

    elif not hp_filter:
        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(data.T - np.mean(data.T, axis=0), axis=0))

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_orig_resid, axis=0))

        del results_orig_resid

    # compute rsquared of nuisance regressors
    zero_idx = np.logical_and(sst == 0, sse == 0)
    sse[zero_idx] = 1
    sst[zero_idx] = 1  # would be NaNs - become rsquared = 0
    rsquare = 1 - np.true_divide(sse, sst)
    rsquare[np.isnan(rsquare)] = 0

    ######### Visualizing DM & outputs
    fontsize = 12
    fontsize_title = 14
    def_img_size = 8

    if not out_figure_path:
        out_figure_path = save_img_file[0:save_img_file.find('.')] + '_figures'

    if not os.path.isdir(out_figure_path):
        os.mkdir(out_figure_path)
    png_append = '_' + img_name[0:img_name.find('.')] + '.png'
    print('Output directory: ' + out_figure_path)

    # DM corr matrix
    cm = df[df.columns[0:-1]].corr()
    curr_sz = copy.deepcopy(def_img_size)
    if cm.shape[0] > def_img_size:
        curr_sz = curr_sz + ((cm.shape[0] - curr_sz) * .3)
    mtx_scale = curr_sz * 100

    mask = np.zeros_like(cm, dtype=np.bool)
    mask[np.triu_indices_from(mask)] = True

    fig, ax = plt.subplots(figsize=(curr_sz, curr_sz))
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    sns.heatmap(cm,
                mask=mask,
                cmap=cmap,
                center=0,
                vmax=cm[cm < 1].max().max(),
                vmin=cm[cm < 1].min().min(),
                square=True,
                linewidths=.5,
                cbar_kws={"shrink": .6})
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(cm.columns.tolist(),
                       rotation=-30,
                       va='bottom',
                       fontsize=fontsize)
    ax.set_title('Nuisance Corr. Matrix', fontsize=fontsize_title)
    plt.tight_layout()
    file_corr_matrix = 'Corr_matrix_regressors' + png_append
    fig.savefig(pjoin(out_figure_path, file_corr_matrix))
    plt.close(fig)
    del fig, ax

    # DM of Nuisance Regressors (all)
    tr_label = 'TR (Volume #)'
    fig, ax = plt.subplots(figsize=(curr_sz - 4.1, def_img_size))
    x_scale_html = ((curr_sz - 4.1) / def_img_size) * 890
    plotting.plot_design_matrix(df, ax=ax)
    ax.set_title('Nuisance Design Matrix', fontsize=fontsize_title)
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(ax.get_yticklabels(), fontsize=fontsize)
    ax.set_ylabel(tr_label, fontsize=fontsize)
    plt.tight_layout()
    file_design_matrix = 'Design_matrix' + png_append
    fig.savefig(pjoin(out_figure_path, file_design_matrix))
    plt.close(fig)
    del fig, ax

    # FD timeseries plot
    FD = 'FD'
    poss_names = [
        'FramewiseDisplacement', FD, 'framewisedisplacement', 'fd',
        'framewise_displacement'
    ]
    fd_idx = [df_orig.columns.__contains__(i) for i in poss_names]
    if np.sum(fd_idx) > 0:
        FD_name = np.array(poss_names)[fd_idx][0]  #poss_names[fd_idx == True]
        if sum(df_orig[FD_name].isnull()) > 0:
            df_orig[FD_name] = df_orig[FD_name].fillna(
                np.mean(df_orig[FD_name]))
        y = df_orig[FD_name].values
        Nremove = []
        sc_idx = []
        for thr_idx, thr in enumerate(FD_thr):
            idx = y >= thr
            sc_idx.append(copy.deepcopy(idx))
            for iidx in np.where(idx)[0]:
                for buffer in sc_range:
                    curr_idx = iidx + buffer
                    if curr_idx >= 0 and curr_idx <= len(idx):
                        sc_idx[thr_idx][curr_idx] = True
            Nremove.append(np.sum(sc_idx[thr_idx]))

        Nplots = len(FD_thr)
        sns.set(font_scale=1.5)
        sns.set_style('ticks')
        fig, axes = plt.subplots(Nplots,
                                 1,
                                 figsize=(def_img_size * 1.5,
                                          def_img_size / 2),
                                 squeeze=False)
        sns.despine()
        bound = .4
        fd_mean = np.mean(y)
        for curr in np.arange(0, Nplots):
            axes[curr, 0].plot(y)
            axes[curr, 0].plot((-bound, Ntrs + bound),
                               FD_thr[curr] * np.ones((1, 2))[0],
                               '--',
                               color='black')
            axes[curr, 0].scatter(np.arange(0, Ntrs), y, s=20)

            if Nremove[curr] > 0:
                info = scipy.ndimage.measurements.label(sc_idx[curr])
                for cluster in np.arange(1, info[1] + 1):
                    temp = np.where(info[0] == cluster)[0]
                    axes[curr, 0].axvspan(temp.min() - bound,
                                          temp.max() + bound,
                                          alpha=.5,
                                          color='red')

            axes[curr, 0].set_ylabel('Framewise Disp. (' + FD + ')')
            axes[curr, 0].set_title(FD + ': ' +
                                    str(100 * Nremove[curr] / Ntrs)[0:4] +
                                    '% of scan (' + str(Nremove[curr]) +
                                    ' volumes) would be scrubbed (FD thr.= ' +
                                    str(FD_thr[curr]) + ')')
            plt.text(Ntrs + 1,
                     FD_thr[curr] - .01,
                     FD + ' = ' + str(FD_thr[curr]),
                     fontsize=fontsize)
            plt.text(Ntrs,
                     fd_mean - .01,
                     'avg = ' + str(fd_mean),
                     fontsize=fontsize)
            axes[curr, 0].set_xlim((-bound, Ntrs + 8))

        plt.tight_layout()
        axes[curr, 0].set_xlabel(tr_label)
        file_fd_plot = FD + '_timeseries' + png_append
        fig.savefig(pjoin(out_figure_path, file_fd_plot))
        plt.close(fig)
        del fig, axes
        print(FD + ' timeseries plot saved')

    else:
        print(FD + ' not found: ' + FD + ' timeseries not plotted')
        file_fd_plot = None

    # Carpet and DVARS plots - before & after nuisance regression

    # need to create mask file to input to DVARS function
    mask_file = pjoin(out_figure_path, 'mask_temp.nii.gz')
    nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
    nifti_masker.fit(img)
    nifti_masker.mask_img_.to_filename(mask_file)

    # create 2 or 3 carpet plots, depending on if LP filter is also applied
    Ncarpet = 2
    total_sz = int(16)
    carpet_scale = 840
    y_labels = ['Input (voxels)', 'Output \'cleaned\'']
    imgs = [img, new_img]
    img_files = [img_file, save_img_file]
    color = ['red', 'salmon']
    labels = ['input', 'cleaned']
    if lp_filter:
        Ncarpet = 3
        total_sz = int(20)
        carpet_scale = carpet_scale * (9 / 8)
        y_labels = ['Input', 'Clean Pre-LP', 'Clean LP']
        imgs.insert(1, temp_img)
        img_files.insert(1, temp_img_file)
        color.insert(1, 'firebrick')
        labels.insert(1, 'clean pre-LP')
        labels[-1] = 'clean LP'

    dvars = []
    print('Computing dvars...')
    for in_file in img_files:
        temp = nac.compute_dvars(in_file=in_file, in_mask=mask_file)[1]
        dvars.append(np.hstack((temp.mean(), temp)))
        del temp

    small_sz = 2
    fig = plt.figure(figsize=(def_img_size * 1.5,
                              def_img_size + ((Ncarpet - 2) * 1)))
    row_used = 0
    if np.sum(fd_idx) > 0:  # if FD data is available
        row_used = row_used + small_sz
        ax0 = plt.subplot2grid((total_sz, 1), (0, 0), rowspan=small_sz)
        ax0.plot(y)
        ax0.scatter(np.arange(0, Ntrs), y, s=10)
        curr = 0
        if Nremove[curr] > 0:
            info = scipy.ndimage.measurements.label(sc_idx[curr])
            for cluster in np.arange(1, info[1] + 1):
                temp = np.where(info[0] == cluster)[0]
                ax0.axvspan(temp.min() - bound,
                            temp.max() + bound,
                            alpha=.5,
                            color='red')
        ax0.set_ylabel(FD)

        for side in ["top", "right", "bottom"]:
            ax0.spines[side].set_color('none')
            ax0.spines[side].set_visible(False)

        ax0.set_xticks([])
        ax0.set_xlim((-.5, Ntrs - .5))
        ax0.spines["left"].set_position(('outward', 10))

    ax_d = plt.subplot2grid((total_sz, 1), (row_used, 0), rowspan=small_sz)
    for iplot in np.arange(len(dvars)):
        ax_d.plot(dvars[iplot], color=color[iplot], label=labels[iplot])
    ax_d.set_ylabel('DVARS')
    for side in ["top", "right", "bottom"]:
        ax_d.spines[side].set_color('none')
        ax_d.spines[side].set_visible(False)
    ax_d.set_xticks([])
    ax_d.set_xlim((-.5, Ntrs - .5))
    ax_d.spines["left"].set_position(('outward', 10))
    ax_d.legend(fontsize=fontsize - 2)
    row_used = row_used + small_sz

    st = 0
    carpet_each = int((total_sz - row_used) / Ncarpet)
    for idx, img_curr in enumerate(imgs):
        ax_curr = plt.subplot2grid((total_sz, 1), (row_used + st, 0),
                                   rowspan=carpet_each)
        fig = plotting.plot_carpet(img_curr, figure=fig, axes=ax_curr)
        ax_curr.set_ylabel(y_labels[idx])
        for side in ["bottom", "left"]:
            ax_curr.spines[side].set_position(('outward', 10))

        if idx < len(imgs) - 1:
            ax_curr.spines["bottom"].set_visible(False)
            ax_curr.set_xticklabels('')
            ax_curr.set_xlabel('')
            st = st + carpet_each

    file_carpet_plot = 'Carpet_plots' + png_append
    fig.savefig(pjoin(out_figure_path, file_carpet_plot))
    plt.close()
    del fig, ax0, ax_curr, ax_d, dvars
    os.remove(mask_file)
    print('Carpet/DVARS plots saved')
    if lp_filter:
        os.remove(temp_img_file)
        del temp_img

    # Display T-stat maps for nuisance regressors
    # create mean img
    img_size = (img.shape[0], img.shape[1], img.shape[2])
    mean_img = nb.Nifti1Image(np.reshape(data_mean, img_size), img.affine)
    mx = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            mx.append(np.max(np.absolute([con.t.min(), con.t.max()])))
    mx = .8 * np.max(mx)
    t_png = 'Tstat_'
    file_tstat = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            m_img = nb.Nifti1Image(np.reshape(con, img_size), img.affine)

            title_str = col + ' Tstat'
            fig = plotting.plot_stat_map(m_img,
                                         mean_img,
                                         threshold=3,
                                         colorbar=True,
                                         display_mode='z',
                                         vmax=mx,
                                         title=title_str,
                                         cut_coords=7)
            file_temp = t_png + col + png_append
            fig.savefig(pjoin(out_figure_path, file_temp))
            file_tstat.append({'name': col, 'file': file_temp})
            plt.close()
            del fig, file_temp
            print(title_str + ' map saved')

    # Display R-sq map for nuisance regressors
    m_img = nb.Nifti1Image(np.reshape(rsquare, img_size), img.affine)
    title_str = 'Nuisance Rsq'
    mx = .95 * rsquare.max()
    fig = plotting.plot_stat_map(m_img,
                                 mean_img,
                                 threshold=.2,
                                 colorbar=True,
                                 display_mode='z',
                                 vmax=mx,
                                 title=title_str,
                                 cut_coords=7)
    file_rsq_map = 'Rsquared' + png_append
    fig.savefig(pjoin(out_figure_path, file_rsq_map))
    plt.close()
    del fig
    print(title_str + ' map saved')

    ######### html report
    templateLoader = jinja2.FileSystemLoader(searchpath="/")
    templateEnv = jinja2.Environment(loader=templateLoader)

    templateVars = {
        "img_file": img_file,
        "save_img_file": save_img_file,
        "Ntrs": Ntrs,
        "tsv_file": tsv_file,
        "col_names": col_names,
        "hp_filter": hp_filter,
        "lp_filter": lp_filter,
        "file_design_matrix": file_design_matrix,
        "file_corr_matrix": file_corr_matrix,
        "file_fd_plot": file_fd_plot,
        "file_rsq_map": file_rsq_map,
        "file_tstat": file_tstat,
        "x_scale": x_scale_html,
        "mtx_scale": mtx_scale,
        "file_carpet_plot": file_carpet_plot,
        "carpet_scale": carpet_scale
    }

    TEMPLATE_FILE = pjoin(os.getcwd(), "report_template.html")
    template = templateEnv.get_template(TEMPLATE_FILE)

    outputText = template.render(templateVars)

    html_file = pjoin(out_figure_path,
                      img_name[0:img_name.find('.')] + '.html')
    with open(html_file, "w") as f:
        f.write(outputText)

    print('')
    print('HTML report: ' + html_file)
    return new_img
Example #20
0
    def fit(self, run_imgs, events=None, confounds=None,
            design_matrices=None):
        """ Fit the GLM

        For each run:
        1. create design matrix X
        2. do a masker job: fMRI_data -> Y
        3. fit regression to (Y, X)

        Parameters
        ----------
        run_imgs: Niimg-like object or list of Niimg-like objects,
            See http://nilearn.github.io/manipulating_images/input_output.html#inputing-data-file-names-or-image-objects  # noqa:E501
            Data on which the GLM will be fitted. If this is a list,
            the affine is considered the same for all.

        events: pandas Dataframe or string or list of pandas DataFrames or
                   strings

            fMRI events used to build design matrices. One events object
            expected per run_img. Ignored in case designs is not None.
            If string, then a path to a csv file is expected.

        confounds: pandas Dataframe, numpy array or string or
                   list of pandas DataFrames, numpy arays or strings 

            Each column in a DataFrame corresponds to a confound variable
            to be included in the regression model of the respective run_img.
            The number of rows must match the number of volumes in the
            respective run_img. Ignored in case designs is not None.
            If string, then a path to a csv file is expected.

        design_matrices: pandas DataFrame or list of pandas DataFrames,
            Design matrices that will be used to fit the GLM. If given it
            takes precedence over events and confounds.

        """
        # Local import to prevent circular imports
        from nilearn.input_data import NiftiMasker  # noqa

        # Check arguments
        # Check imgs type
        if events is not None:
            _check_events_file_uses_tab_separators(events_files=events)
        if not isinstance(run_imgs, (list, tuple)):
            run_imgs = [run_imgs]
        if design_matrices is None:
            if events is None:
                raise ValueError('events or design matrices must be provided')
            if self.t_r is None:
                raise ValueError('t_r not given to FirstLevelModel object'
                                 ' to compute design from events')
        else:
            design_matrices = _check_run_tables(run_imgs, design_matrices,
                                                'design_matrices')
        # Check that number of events and confound files match number of runs
        # Also check that events and confound files can be loaded as DataFrame
        if events is not None:
            events = _check_run_tables(run_imgs, events, 'events')
        if confounds is not None:
            confounds = _check_run_tables(run_imgs, confounds, 'confounds')

        # Learn the mask
        if self.mask_img is False:
            # We create a dummy mask to preserve functionality of api
            ref_img = check_niimg(run_imgs[0])
            self.mask_img = Nifti1Image(np.ones(ref_img.shape[:3]),
                                        ref_img.affine)
        if not isinstance(self.mask_img, NiftiMasker):
            self.masker_ = NiftiMasker(mask_img=self.mask_img,
                                       smoothing_fwhm=self.smoothing_fwhm,
                                       target_affine=self.target_affine,
                                       standardize=self.standardize,
                                       mask_strategy='epi',
                                       t_r=self.t_r,
                                       memory=self.memory,
                                       verbose=max(0, self.verbose - 2),
                                       target_shape=self.target_shape,
                                       memory_level=self.memory_level
                                       )
            self.masker_.fit(run_imgs[0])
        else:
            if self.mask_img.mask_img_ is None and self.masker_ is None:
                self.masker_ = clone(self.mask_img)
                for param_name in ['target_affine', 'target_shape',
                                   'smoothing_fwhm', 't_r', 'memory',
                                   'memory_level']:
                    our_param = getattr(self, param_name)
                    if our_param is None:
                        continue
                    if getattr(self.masker_, param_name) is not None:
                        warn('Parameter %s of the masker'
                             ' overriden' % param_name)
                    setattr(self.masker_, param_name, our_param)
                self.masker_.fit(run_imgs[0])
            else:
                self.masker_ = self.mask_img

        # For each run fit the model and keep only the regression results.
        self.labels_, self.results_, self.design_matrices_ = [], [], []
        n_runs = len(run_imgs)
        t0 = time.time()
        for run_idx, run_img in enumerate(run_imgs):
            # Report progress
            if self.verbose > 0:
                percent = float(run_idx) / n_runs
                percent = round(percent * 100, 2)
                dt = time.time() - t0
                # We use a max to avoid a division by zero
                if run_idx == 0:
                    remaining = 'go take a coffee, a big one'
                else:
                    remaining = (100. - percent) / max(0.01, percent) * dt
                    remaining = '%i seconds remaining' % remaining

                sys.stderr.write(
                    "Computing run %d out of %d runs (%s)\n"
                    % (run_idx + 1, n_runs, remaining))

            # Build the experimental design for the glm
            run_img = check_niimg(run_img, ensure_ndim=4)
            if design_matrices is None:
                n_scans = get_data(run_img).shape[3]
                if confounds is not None:
                    confounds_matrix = confounds[run_idx].values
                    if confounds_matrix.shape[0] != n_scans:
                        raise ValueError('Rows in confounds does not match'
                                         'n_scans in run_img at index %d'
                                         % (run_idx,))
                    confounds_names = confounds[run_idx].columns.tolist()
                else:
                    confounds_matrix = None
                    confounds_names = None
                start_time = self.slice_time_ref * self.t_r
                end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r
                frame_times = np.linspace(start_time, end_time, n_scans)
                design = make_first_level_design_matrix(frame_times,
                                                        events[run_idx],
                                                        self.hrf_model,
                                                        self.drift_model,
                                                        self.high_pass,
                                                        self.drift_order,
                                                        self.fir_delays,
                                                        confounds_matrix,
                                                        confounds_names,
                                                        self.min_onset
                                                        )
            else:
                design = design_matrices[run_idx]
            self.design_matrices_.append(design)

            # Mask and prepare data for GLM
            if self.verbose > 1:
                t_masking = time.time()
                sys.stderr.write('Starting masker computation \r')

            Y = self.masker_.transform(run_img)
            del run_img  # Delete unmasked image to save memory

            if self.verbose > 1:
                t_masking = time.time() - t_masking
                sys.stderr.write('Masker took %d seconds       \n'
                                 % t_masking)

            if self.signal_scaling:
                Y, _ = mean_scaling(Y, self.scaling_axis)
            if self.memory:
                mem_glm = self.memory.cache(run_glm, ignore=['n_jobs'])
            else:
                mem_glm = run_glm

            # compute GLM
            if self.verbose > 1:
                t_glm = time.time()
                sys.stderr.write('Performing GLM computation\r')
            labels, results = mem_glm(Y, design.values,
                                      noise_model=self.noise_model,
                                      bins=100, n_jobs=self.n_jobs)
            if self.verbose > 1:
                t_glm = time.time() - t_glm
                sys.stderr.write('GLM took %d seconds         \n' % t_glm)

            self.labels_.append(labels)
            # We save memory if inspecting model details is not necessary
            if self.minimize_memory:
                for key in results:
                    results[key] = SimpleRegressionResults(results[key])
            self.results_.append(results)
            del Y

        # Report progress
        if self.verbose > 0:
            sys.stderr.write("\nComputation of %d runs done in %i seconds\n\n"
                             % (n_runs, time.time() - t0))

        return self