Esempio n. 1
0
def fixed_effects_analysis(subject_dic, mask_img=None, mesh=False):
    """ Combine the AP and PA images """
    from nibabel import load, save
    from nilearn.plotting import plot_stat_map

    session_ids = subject_dic['session_id']
    task_ids = _session_id_to_task_id(session_ids)
    paradigms = np.unique(task_ids)
    if mask_img is None:
        mask_img = os.path.join(subject_dic['output_dir'], "mask.nii.gz")

    # Guessing paradigm from file name
    for paradigm in paradigms:
        # select the sessions relevant for the paradigm
        session_paradigm = [
            session_id for (session_id, task_id) in zip(session_ids, task_ids)
            if task_id == paradigm
        ]
        # define the relevant contrasts
        contrasts = make_contrasts(paradigm).keys()
        # create write_dir
        if mesh is not False:
            if mesh == 'fsaverage5':
                write_dir = os.path.join(subject_dic['output_dir'],
                                         'res_fsaverage5_%s_ffx' % paradigm)
            elif mesh == 'individual':
                write_dir = os.path.join(subject_dic['output_dir'],
                                         'res_individual_%s_ffx' % paradigm)
            else:
                write_dir = os.path.join(subject_dic['output_dir'],
                                         'res_fsaverage7_%s_ffx' % paradigm)
            dirs = [
                os.path.join(write_dir, stat)
                for stat in ['effect_surf', 'variance_surf', 'stat_surf']
            ]
        else:
            write_dir = os.path.join(subject_dic['output_dir'],
                                     'res_stats_%s_ffx' % paradigm)
            dirs = [
                os.path.join(write_dir, stat) for stat in
                ['effect_size_maps', 'effect_variance_maps', 'stat_maps']
            ]
        for dir_ in dirs:
            if not os.path.exists(dir_):
                os.makedirs(dir_)
        print(write_dir)

        # iterate across contrasts
        for contrast in contrasts:
            print('fixed effects for contrast %s. ' % contrast)
            if mesh is not False:
                from nibabel.gifti import write
                for side in ['lh', 'rh']:
                    effect_size_maps, effect_variance_maps, data_available =\
                        _load_summary_stats(
                            subject_dic['output_dir'],
                            np.unique(session_paradigm),
                            contrast,
                            data_available=True, side=side, mesh=mesh)
                    if not data_available:
                        raise ValueError('Missing texture stats files for '
                                         'fixed effects computations')
                    ffx_effects, ffx_variance, ffx_stat = fixed_effects_surf(
                        effect_size_maps, effect_variance_maps)
                    write(
                        ffx_effects,
                        os.path.join(
                            write_dir,
                            'effect_surf/%s_%s.gii' % (contrast, side)))
                    write(
                        ffx_variance,
                        os.path.join(
                            write_dir,
                            'variance_surf/%s_%s.gii' % (contrast, side)))
                    write(
                        ffx_stat,
                        os.path.join(write_dir,
                                     'stat_surf/%s_%s.gii' % (contrast, side)))
            else:
                effect_size_maps, effect_variance_maps, data_available =\
                    _load_summary_stats(
                        subject_dic['output_dir'], session_paradigm, contrast,
                        data_available=True)
                shape = load(effect_size_maps[0]).shape
                if len(shape) > 3:
                    if shape[3] > 1:  # F contrast, skipping
                        continue
                ffx_effects, ffx_variance, ffx_stat = fixed_effects_img(
                    effect_size_maps, effect_variance_maps, mask_img)
                save(
                    ffx_effects,
                    os.path.join(write_dir,
                                 'effect_size_maps/%s.nii.gz' % contrast))
                save(
                    ffx_variance,
                    os.path.join(write_dir,
                                 'effect_variance_maps/%s.nii.gz' % contrast))
                save(ffx_stat,
                     os.path.join(write_dir, 'stat_maps/%s.nii.gz' % contrast))
                plot_stat_map(ffx_stat,
                              bg_img=subject_dic['anat'],
                              display_mode='z',
                              dim=0,
                              cut_coords=7,
                              title=contrast,
                              threshold=3.0,
                              output_file=os.path.join(
                                  write_dir, 'stat_maps/%s.png' % contrast))
Esempio n. 2
0
def first_level(subject_dic,
                additional_regressors=None,
                compcorr=False,
                smooth=None,
                mesh=False,
                mask_img=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject

    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whether confound estimation and removal should be done or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    start_time = time.ctime()
    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = subject_dic['hrf_model']
    high_pass = subject_dic['high_pass']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']
    slice_time_ref = 1.

    if not mesh and (mask_img is None):
        mask_img = masking(subject_dic['func'], subject_dic['output_dir'])

    if additional_regressors is None:
        additional_regressors = dict([
            (session_id, None) for session_id in subject_dic['session_id']
        ])

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):

        task_id = _session_id_to_task_id([session_id])[0]

        if mesh is not False:
            from nibabel.gifti import read
            n_scans = np.array(
                [darrays.data for darrays in read(fmri_path).darrays]).shape[0]
        else:
            n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(slice_time_ref,
                                 (n_scans - 1 + slice_time_ref) * tr, n_scans)
        if task_id == 'audio':
            mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1])
            n_cycles = 28
            cycle_duration = 20
            t_r = 2
            cycle = np.arange(0, cycle_duration, t_r)[mask > 0]
            frametimes = np.tile(cycle, n_cycles) +\
                np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum())
            frametimes = frametimes[:-2]  # for some reason...

        if mesh is not False:
            compcorr = False  # XXX Fixme

        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names
        else:
            confounds = motion
            confound_names = motion_names

        if onset is None:
            warnings.warn('Onset file not provided. Trying to guess it')
            task = os.path.basename(fmri_path).split('task')[-1][4:]
            onset = os.path.join(
                os.path.split(os.path.dirname(fmri_path))[0], 'model001',
                'onsets', 'task' + task + '_run001', 'task%s.csv' % task)

        if not os.path.exists(onset):
            warnings.warn('non-existant onset file. proceeding without it')
            paradigm = None
        else:
            paradigm = make_paradigm(onset, task_id)

        # handle manually supplied regressors
        add_reg_names = []
        if additional_regressors[session_id] is None:
            add_regs = confounds
        else:
            df = read_csv(additional_regressors[session_id])
            add_regs = []
            for regressor in df:
                add_reg_names.append(regressor)
                add_regs.append(df[regressor])
            add_regs = np.array(add_regs).T
            add_regs = np.hstack((add_regs, confounds))

        add_reg_names += confound_names

        # create the design matrix
        design_matrix = make_first_level_design_matrix(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            high_pass=high_pass,
            add_regs=add_regs,
            add_reg_names=add_reg_names)
        _, dmtx, names = check_design_matrix(design_matrix)

        # create the relevant contrasts
        contrasts = make_contrasts(task_id, names)

        if mesh == 'fsaverage5':
            # this is low-resolution data
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_fsaverage5_%s' % session_id)
        elif mesh == 'fsaverage7':
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_fsaverage7_%s' % session_id)
        elif mesh == 'individual':
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_individual_%s' % session_id)
        else:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'),
                 design_matrix=design_matrix)

        if mesh is not False:
            run_surface_glm(design_matrix, contrasts, fmri_path,
                            subject_session_output_dir)
        else:
            z_maps, fmri_glm = run_glm(design_matrix,
                                       contrasts,
                                       fmri_path,
                                       mask_img,
                                       subject_dic,
                                       subject_session_output_dir,
                                       tr=tr,
                                       slice_time_ref=slice_time_ref,
                                       smoothing_fwhm=smooth)

            # do stats report
            anat_img = nib.load(subject_dic['anat'])
            stats_report_filename = os.path.join(subject_session_output_dir,
                                                 'report_stats.html')

            report = make_glm_report(
                fmri_glm,
                contrasts,
                threshold=3.0,
                bg_img=anat_img,
                cluster_threshold=15,
                title="GLM for subject %s" % session_id,
            )
            report.save_as_html(stats_report_filename)