"""specify contrasts"""
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
I = np.eye(len(names))
for i in xrange(2):
    contrasts['%s' % names[2 * i]] = I[2 * i]

"""more interesting contrasts"""
contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2']
contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1']
contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2']

"""fit GLM"""
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(fmri_files, matrix, mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

"""save computed mask"""
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
mean_fmri_files = compute_mean_3D_image(fmri_files)
print "Computing contrasts .."
z_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, eff_map, var_map = fmri_glm.contrast(
        contrasts[contrast_id],
Esempio n. 2
0
def do_subject_glm(subject_id):
    subject_output_dir = os.path.join(output_dir, subject_id)

    # make design matrices
    design_matrices = []
    func = []
    anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
    for run_path in sorted(
            glob.glob(
                os.path.join(data_dir, subject_id,
                             "model/model001/onsets/task*"))):
        run_id = os.path.basename(run_path)
        run_func = glob.glob(
            os.path.join(subject_output_dir, "BOLD", run_id, "wrbold*.nii"))
        assert len(run_func) == 1
        run_func = run_func[0]
        run_onset_paths = sorted(
            glob.glob(
                os.path.join(data_dir, subject_id,
                             "model/model001/onsets/%s/*" % run_id)))
        onsets = map(np.loadtxt, run_onset_paths)
        conditions = np.hstack([[condition_keys["cond%03i" % (c + 1)]] *
                                len(onsets[c])
                                for c in range(len(run_onset_paths))])
        onsets = np.vstack((onsets))
        onsets *= tr
        run_func = nibabel.load(run_func)
        func.append(run_func)
        n_scans = run_func.shape[-1]
        onset, duration, modulation = onsets.T

        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        paradigm = pd.DataFrame(
            dict(name=conditions,
                 onset=onset,
                 duration=duration,
                 modulation=modulation))
        design_matrix = make_design_matrix(frametimes,
                                           paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut)
        design_matrices.append(design_matrix)
    n_runs = len(func)

    # specify contrasts
    _, _, names = check_design_matrix(design_matrix)
    n_columns = len(names)
    contrast_matrix = np.eye(n_columns)
    contrasts = {}
    for c in range(len(condition_keys)):
        contrasts[names[2 * c]] = contrast_matrix[2 * c]
    contrasts["avg"] = np.mean(contrasts.values(), axis=0)

    # more interesting contrasts
    contrasts_ = {}
    for contrast, val in contrasts.items():
        if not contrast == "avg":
            contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
    contrasts = contrasts_

    # fit GLM
    from nilearn.image import smooth_img
    func = smooth_img(func, fwhm=8.)
    print('Fitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel(func, [
        check_design_matrix(design_matrix)[1]
        for design_matrix in design_matrices
    ],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_output_dir, "mask.nii")
    print("Saving mask image to %s ..." % mask_path)
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrast maps
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * n_runs,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            nibabel.save(out_map, map_path)
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # # generate stats report
    # stats_report_filename = os.path.join(subject_output_dir, "reports",
    #                                      "report_stats.html")
    # generate_subject_stats_report(
    #     stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
    #     threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
    #     subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
    #     paradigm=paradigm, frametimes=frametimes,
    #     drift_model=drift_model, hrf_model=hrf_model)
    # ProgressReport().finish_dir(subject_output_dir)

    return dict(subject_id=subject_id,
                mask=mask_path,
                effects_maps=effects_maps,
                z_maps=z_maps,
                contrasts=contrasts)
Esempio n. 3
0
def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir,
                            start_time=base_reporter.pretty_time(),
                            **kwargs):
    """
    Runs a one-sample t-test procedure for group analysis. Here, we are
    for each experimental condition, only interested refuting the null
    hypothesis H0: "The average effect accross the subjects is zero!"

    Parameters
    ----------
    masks: list of strings or nibabel image objects
        subject masks, one per subject

    effects_maps: list of dicts of lists
        effects maps from subject-level GLM; each entry is a dictionary;
        each entry (indexed by condition id) of this dictionary is the
        filename (or correspinding nibabel image object) for the effects
        maps for that condition (aka contrast),for that subject

    contrasts: dictionary of array_likes
        contrasts vectors, indexed by condition id

    kwargs: dict_like
        kwargs for plot_stats_map API
    """

    # make output directory
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    assert len(masks) == len(effects_maps), (len(masks), len(effects_maps))

    # compute group mask
    group_mask = intersect_masks(masks)

    # construct design matrix (only one covariate, namely the "mean effect")
    design_matrix = np.ones(len(effects_maps)
                            )[:, np.newaxis]  # only the intercept

    group_level_z_maps = {}
    group_level_t_maps = {}
    for contrast_id in contrasts:
        print "\tcontrast id: %s" % contrast_id

        # effects maps will be the input to the second level GLM
        first_level_image = nibabel.concat_images(
            [x[contrast_id] for x in effects_maps])

        # fit 2nd level GLM for given contrast
        group_model = FMRILinearModel(first_level_image,
                                      design_matrix, group_mask)
        group_model.fit(do_scaling=False, model='ols')

        # specify and estimate the contrast
        contrast_val = np.array(([[1.]])
                                )  # the only possible contrast !
        z_map, t_map = group_model.contrast(
            contrast_val, con_id='one_sample %s' % contrast_id, output_z=True,
            output_stat=True)

        # save map
        for map_type, map_img in zip(["z", "t"], [z_map, t_map]):
            map_dir = os.path.join(output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % (
                    contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(map_img, map_path)
            if map_type == "z":
                group_level_z_maps[contrast_id] = map_path
            elif map_type == "t":
                group_level_z_maps[contrast_id] = map_path

    # do stats report
    stats_report_filename = os.path.join(output_dir, "report_stats.html")
    generate_subject_stats_report(stats_report_filename, contrasts,
                                  group_level_z_maps, group_mask,
                                  start_time=start_time,
                                  **kwargs)

    print "\r\nStatistic report written to %s\r\n" % (
        stats_report_filename)

    return group_level_z_maps
Esempio n. 4
0
def execute_glm(doc, out_dir, contrast_definitions=None,
                outputs=None, glm_model='ar1',
                ):
    """Function to execute GLM for one subject --and perhaps multiple
    sessions thereof

    """

    stats_start_time = time.ctime()

    # study_dir = os.path.join(out_dir, doc['study'])

    if outputs is None:
        outputs = {'maps': False,
                   'data': False,
                   'mask': True,
                   'model': True,
                   }
    else:
        outputs['maps'] = False

    subject_id = doc['subject']
    subject_output_dir = os.path.join(
        out_dir, subject_id)

    _export(doc, subject_output_dir, outputs=outputs)

    params = load_glm_params(doc)

    # instantiate GLM
    fmri_glm = FMRILinearModel(params['data'],
                               params['design_matrices'],
                               mask=doc['mask'])

    # fit GLM
    fmri_glm.fit(do_scaling=True, model=glm_model)

    # save beta-maps to disk
    beta_map_dir = os.path.join(subject_output_dir, 'beta_maps')
    if not os.path.exists(beta_map_dir):
        os.makedirs(beta_map_dir)
    for j, glm in zip(range(len(fmri_glm.glms)), fmri_glm.glms):
        # XXX save array in some compressed format
        np.savetxt(os.path.join(beta_map_dir, "beta_map_%i.txt" % j),
                   glm.get_beta(),  # array has shape (n_conditions, n_voxels)
                   )

    # define contrasts
    if contrast_definitions is not None:
        params['contrasts'] = make_contrasts(params, contrast_definitions)
    contrasts = sorted(params['contrasts'][0].keys())

    _contrasts = {}
    z_maps = {}

    # compute stats maps
    for index, contrast_id in enumerate(contrasts):
        print ' study[%s] subject[%s] contrast [%s]: %i/%i' % (
            doc['study'], doc['subject'],
            contrast_id, index + 1, len(contrasts)
            )

        contrast = [c[contrast_id] for c in params['contrasts']]
        contrast_name = contrast_id.replace(' ', '_')

        z_map, t_map, c_map, var_map = fmri_glm.contrast(
            contrast,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,)

        for dtype, out_map in zip(['z', 't', 'c', 'variance'],
                                  [z_map, t_map, c_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name)
            nb.save(out_map, map_path)

            # collect z map
            if dtype == 'z':
                _contrasts[contrast_name] = contrast
                z_maps[contrast_name] = map_path

    # invoke a single API to handle plotting and html business for you
    subject_stats_report_filename = os.path.join(
        subject_output_dir, "report_stats.html")
    glm_reporter.generate_subject_stats_report(
        subject_stats_report_filename,
        _contrasts,
        z_maps,
        doc['mask'],
        design_matrices=list(params['design_matrices']),
        subject_id=doc['subject'],
        cluster_th=15,  # 15 voxels
        start_time=stats_start_time,
        TR=doc['TR'],
        n_scans=doc['n_scans'],
        n_sessions=doc['n_sessions'],
        model=glm_model,
        )

    print "Report for subject %s written to %s" % (
        doc['subject'],
        subject_stats_report_filename)
Esempio n. 5
0
def execute_glm(
    doc,
    out_dir,
    contrast_definitions=None,
    outputs=None,
    glm_model='ar1',
):
    """Function to execute GLM for one subject --and perhaps multiple
    sessions thereof

    """

    stats_start_time = time.ctime()

    # study_dir = os.path.join(out_dir, doc['study'])

    if outputs is None:
        outputs = {
            'maps': False,
            'data': False,
            'mask': True,
            'model': True,
        }
    else:
        outputs['maps'] = False

    subject_id = doc['subject']
    subject_output_dir = os.path.join(out_dir, subject_id)

    _export(doc, subject_output_dir, outputs=outputs)

    params = load_glm_params(doc)

    # instantiate GLM
    fmri_glm = FMRILinearModel(params['data'],
                               params['design_matrices'],
                               mask=doc['mask'])

    # fit GLM
    fmri_glm.fit(do_scaling=True, model=glm_model)

    # save beta-maps to disk
    beta_map_dir = os.path.join(subject_output_dir, 'beta_maps')
    if not os.path.exists(beta_map_dir):
        os.makedirs(beta_map_dir)
    for j, glm in zip(range(len(fmri_glm.glms)), fmri_glm.glms):
        # XXX save array in some compressed format
        np.savetxt(
            os.path.join(beta_map_dir, "beta_map_%i.txt" % j),
            glm.get_beta(),  # array has shape (n_conditions, n_voxels)
        )

    # define contrasts
    if contrast_definitions is not None:
        params['contrasts'] = make_contrasts(params, contrast_definitions)
    contrasts = sorted(params['contrasts'][0].keys())

    _contrasts = {}
    z_maps = {}

    # compute stats maps
    for index, contrast_id in enumerate(contrasts):
        print ' study[%s] subject[%s] contrast [%s]: %i/%i' % (
            doc['study'], doc['subject'], contrast_id, index + 1,
            len(contrasts))

        contrast = [c[contrast_id] for c in params['contrasts']]
        contrast_name = contrast_id.replace(' ', '_')

        z_map, t_map, c_map, var_map = fmri_glm.contrast(
            contrast,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
        )

        for dtype, out_map in zip(['z', 't', 'c', 'variance'],
                                  [z_map, t_map, c_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name)
            nb.save(out_map, map_path)

            # collect z map
            if dtype == 'z':
                _contrasts[contrast_name] = contrast
                z_maps[contrast_name] = map_path

    # invoke a single API to handle plotting and html business for you
    subject_stats_report_filename = os.path.join(subject_output_dir,
                                                 "report_stats.html")
    glm_reporter.generate_subject_stats_report(
        subject_stats_report_filename,
        _contrasts,
        z_maps,
        doc['mask'],
        design_matrices=list(params['design_matrices']),
        subject_id=doc['subject'],
        cluster_th=15,  # 15 voxels
        start_time=stats_start_time,
        TR=doc['TR'],
        n_scans=doc['n_scans'],
        n_sessions=doc['n_sessions'],
        model=glm_model,
    )

    print "Report for subject %s written to %s" % (
        doc['subject'], subject_stats_report_filename)
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
contrast_matrix = np.eye(n_columns)
for i in xrange(2):
    contrasts[names[2 * i]] = contrast_matrix[2 * i]

# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled']

# fit GLM
print 'Fitting a GLM (this takes time)...'
fmri_glm = FMRILinearModel(
    [nibabel.concat_images(x) for x in subject_data.func],
    [check_design_matrix(design_matrix)[1]
     for design_matrix in design_matrices], mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)
mask_images.append(mask_path)

# compute contrast maps
z_maps = {}
effects_maps = {}
for contrast_id, contrast_val in contrasts.items():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, effects_map, var_map = fmri_glm.contrast(
plt.savefig(dmat_outfile, bbox_inches="tight", dpi=200)

# specify contrasts
contrasts = {}
_, matrix, names = check_design_matrix(design_matrix)
contrast_matrix = np.eye(len(names))
for i in range(len(names)):
    contrasts[names[i]] = contrast_matrix[i]

# more interesting contrasts"""
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']}

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(nibabel.concat_images(subject_data.func[0]),
                           matrix,
                           mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
anat_img = nibabel.load(subject_data.anat)

print "Computing contrasts .."
z_maps = {}
effects_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
fmri_files = results[0]['func']
anat_file = results[0]['anat']
"""specify contrasts"""
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
I = np.eye(len(names))
for i in xrange(2):
    contrasts['%s' % names[2 * i]] = I[2 * i]
"""more interesting contrasts"""
contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2']
contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1']
contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2']
"""fit GLM"""
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(fmri_files, matrix, mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')
"""save computed mask"""
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
mean_fmri_files = compute_mean_3D_image(fmri_files)
print "Computing contrasts .."
z_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, eff_map, var_map = fmri_glm.contrast(
        contrasts[contrast_id],
        con_id=contrast_id,
Esempio n. 9
0
def group_one_sample_t_test(masks,
                            effects_maps,
                            contrasts,
                            output_dir,
                            start_time=base_reporter.pretty_time(),
                            **kwargs):
    """
    Runs a one-sample t-test procedure for group analysis. Here, we are
    for each experimental condition, only interested refuting the null
    hypothesis H0: "The average effect accross the subjects is zero!"

    Parameters
    ----------
    masks: list of strings or nibabel image objects
        subject masks, one per subject

    effects_maps: list of dicts of lists
        effects maps from subject-level GLM; each entry is a dictionary;
        each entry (indexed by condition id) of this dictionary is the
        filename (or correspinding nibabel image object) for the effects
        maps for that condition (aka contrast),for that subject

    contrasts: dictionary of array_likes
        contrasts vectors, indexed by condition id

    kwargs: dict_like
        kwargs for plot_stats_map API
    """

    # make output directory
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    assert len(masks) == len(effects_maps), (len(masks), len(effects_maps))

    # compute group mask
    group_mask = intersect_masks(masks)

    # construct design matrix (only one covariate, namely the "mean effect")
    design_matrix = np.ones(
        len(effects_maps))[:, np.newaxis]  # only the intercept

    group_level_z_maps = {}
    group_level_t_maps = {}
    for contrast_id in contrasts:
        print "\tcontrast id: %s" % contrast_id

        # effects maps will be the input to the second level GLM
        first_level_image = nibabel.concat_images(
            [x[contrast_id] for x in effects_maps])

        # fit 2nd level GLM for given contrast
        group_model = FMRILinearModel(first_level_image, design_matrix,
                                      group_mask)
        group_model.fit(do_scaling=False, model='ols')

        # specify and estimate the contrast
        contrast_val = np.array(([[1.]]))  # the only possible contrast !
        z_map, t_map = group_model.contrast(contrast_val,
                                            con_id='one_sample %s' %
                                            contrast_id,
                                            output_z=True,
                                            output_stat=True)

        # save map
        for map_type, map_img in zip(["z", "t"], [z_map, t_map]):
            map_dir = os.path.join(output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir,
                                    'group_level_%s.nii.gz' % (contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(map_img, map_path)
            if map_type == "z":
                group_level_z_maps[contrast_id] = map_path
            elif map_type == "t":
                group_level_z_maps[contrast_id] = map_path

    # do stats report
    stats_report_filename = os.path.join(output_dir, "report_stats.html")
    generate_subject_stats_report(stats_report_filename,
                                  contrasts,
                                  group_level_z_maps,
                                  group_mask,
                                  start_time=start_time,
                                  **kwargs)

    print "\r\nStatistic report written to %s\r\n" % (stats_report_filename)

    return group_level_z_maps
Esempio n. 10
0
def do_subject_glm(subject_id):
    subject_output_dir = os.path.join(output_dir, subject_id)

    # make design matrices
    design_matrices = []
    func = []
    anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
    for run_path in sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/task*"))):
        run_id = os.path.basename(run_path)
        run_func = glob.glob(os.path.join(subject_output_dir, "BOLD", run_id,
                                          "wrbold*.nii"))
        assert len(run_func) == 1
        run_func = run_func[0]
        run_onset_paths = sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/%s/*" % run_id)))
        onsets = map(np.loadtxt, run_onset_paths)
        conditions = np.hstack(
            [[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c])
             for c in range(len(run_onset_paths))])
        onsets = np.vstack((onsets))
        onsets *= tr
        run_func = nibabel.load(run_func)
        func.append(run_func)
        n_scans = run_func.shape[-1]
        onset, duration, modulation = onsets.T

        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        paradigm = pd.DataFrame(dict(name=conditions, onset=onset,
                                     duration=duration, modulation=modulation))
        design_matrix = make_design_matrix(frametimes, paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut)
        design_matrices.append(design_matrix)
    n_runs = len(func)

    # specify contrasts
    _, _, names = check_design_matrix(design_matrix)
    n_columns = len(names)
    contrast_matrix = np.eye(n_columns)
    contrasts = {}
    for c in range(len(condition_keys)):
        contrasts[names[2 * c]] = contrast_matrix[2 * c]
    contrasts["avg"] = np.mean(contrasts.values(), axis=0)

    # more interesting contrasts
    contrasts_ = {}
    for contrast, val in contrasts.items():
        if not contrast == "avg":
            contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
    contrasts = contrasts_

    # fit GLM
    from nilearn.image import smooth_img
    func = smooth_img(func, fwhm=8.)
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel(func, [check_design_matrix(design_matrix)[1]
                                      for design_matrix in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_output_dir, "mask.nii")
    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrast maps
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * n_runs, con_id=contrast_id, output_z=True,
            output_stat=True, output_effects=True, output_variance=True)
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # # generate stats report
    # stats_report_filename = os.path.join(subject_output_dir, "reports",
    #                                      "report_stats.html")
    # generate_subject_stats_report(
    #     stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
    #     threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
    #     subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
    #     paradigm=paradigm, frametimes=frametimes,
    #     drift_model=drift_model, hrf_model=hrf_model)
    # ProgressReport().finish_dir(subject_output_dir)

    return dict(subject_id=subject_id, mask=mask_path,
                effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)