Beispiel #1
0
def make_dmtx_from_timing_files(timing_files,
                                condition_ids=None,
                                frametimes=None,
                                n_scans=None,
                                tr=None,
                                add_regs_file=None,
                                add_reg_names=None,
                                **make_dmtx_kwargs):
    # make paradigm
    paradigm = make_paradigm_from_timing_files(timing_files,
                                               condition_ids=condition_ids)

    # make frametimes
    if frametimes is None:
        assert not n_scans is None, ("frametimes not specified, especting a "
                                     "value for n_scans")
        assert not tr is None, ("frametimes not specified, especting a "
                                "value for tr")
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    else:
        assert n_scans is None, ("frametimes specified, not especting a "
                                 "value for n_scans")
        assert tr is None, ("frametimes specified, not especting a "
                            "value for tr")

    # load addition regressors from file
    if not add_regs_file is None:
        if isinstance(add_regs_file, np.ndarray):
            add_regs = add_regs_file
        else:
            assert os.path.isfile(add_regs_file), (
                "add_regs_file %s doesn't exist")
            add_regs = np.loadtxt(add_regs_file)
        assert add_regs.ndim == 2, (
            "Bad add_regs_file: %s (must contain a 2D array, each column "
            "representing the values of a single regressor)" % add_regs_file)
        if add_reg_names is None:
            add_reg_names = [
                "R%i" % (col + 1) for col in range(add_regs.shape[-1])
            ]
        else:
            assert len(add_reg_names) == add_regs.shape[1], (
                "Expecting %i regressor names, got %i" %
                (add_regs.shape[1], len(add_reg_names)))

        make_dmtx_kwargs["add_reg_names"] = add_reg_names
        make_dmtx_kwargs["add_regs"] = add_regs

    # make design matrix
    design_matrix = make_design_matrix(frame_times=frametimes,
                                       paradigm=paradigm,
                                       **make_dmtx_kwargs)

    # return output
    return design_matrix, paradigm, frametimes
def make_dmtx_from_timing_files(timing_files, condition_ids=None,
                                frametimes=None, n_scans=None, tr=None,
                                add_regs_file=None,
                                add_reg_names=None,
                                **make_dmtx_kwargs):
    # make paradigm
    paradigm = make_paradigm_from_timing_files(timing_files,
                                               condition_ids=condition_ids)

    # make frametimes
    if frametimes is None:
        assert not n_scans is None, ("frametimes not specified, especting a "
                                     "value for n_scans")
        assert not tr is None, ("frametimes not specified, especting a "
                                "value for tr")
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    else:
        assert n_scans is None, ("frametimes specified, not especting a "
                                 "value for n_scans")
        assert tr is None, ("frametimes specified, not especting a "
                                 "value for tr")

    # load addition regressors from file
    if not add_regs_file is None:
        if isinstance(add_regs_file, np.ndarray):
            add_regs = add_regs_file
        else:
            assert os.path.isfile(add_regs_file), (
                "add_regs_file %s doesn't exist")
            add_regs = np.loadtxt(add_regs_file)
        assert add_regs.ndim == 2, (
            "Bad add_regs_file: %s (must contain a 2D array, each column "
            "representing the values of a single regressor)" % add_regs_file)
        if add_reg_names is None:
            add_reg_names = ["R%i" % (col + 1) for col in range(
                    add_regs.shape[-1])]
        else:
            assert len(add_reg_names) == add_regs.shape[1], (
                "Expecting %i regressor names, got %i" % (
                    add_regs.shape[1], len(add_reg_names)))

        make_dmtx_kwargs["add_reg_names"] = add_reg_names
        make_dmtx_kwargs["add_regs"] = add_regs

    # make design matrix
    design_matrix = make_design_matrix(frame_times=frametimes,
                                       paradigm=paradigm,
                                       **make_dmtx_kwargs)

    # return output
    return design_matrix, paradigm, frametimes
    n_scans = len(subject_data.func[x])
    timing = scipy.io.loadmat(getattr(subject_data, "trials_ses%i" % (x + 1)),
                              squeeze_me=True, struct_as_record=False)

    faces_onsets = timing['onsets'][0].ravel()
    scrambled_onsets = timing['onsets'][1].ravel()
    onsets = np.hstack((faces_onsets, scrambled_onsets))
    onsets *= tr  # because onsets were reporting in 'scans' units
    conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len(
        scrambled_onsets)

    # build design matrix
    frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})
    design_matrix = make_design_matrix(frametimes, paradigm,
                                       hrf_model=hrf_model,
                                       drift_model=drift_model,
                                       period_cut=hfcut)
    design_matrices.append(design_matrix)

# specify contrasts
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
contrast_matrix = np.eye(n_columns)
for i in xrange(2):
    contrasts[names[2 * i]] = contrast_matrix[2 * i]

# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled']
Beispiel #4
0
fd = open(sd.func[0].split(".")[0] + "_onset.txt", "w")
for c, o, d in zip(conditions, onset, duration):
    fd.write("%s %s %s\r\n" % (c, o, d))
fd.close()

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct design matrix
nscans = len(subject_data.func[0])
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
drift_model = 'Cosine'
hrf_model = 'spm + derivative'
design_matrix = make_design_matrix(frametimes,
                                   paradigm,
                                   hrf_model=hrf_model,
                                   drift_model=drift_model,
                                   period_cut=hfcut)

# plot and save design matrix
ax = plot_design_matrix(design_matrix)
ax.set_position([.05, .25, .9, .65])
ax.set_title('Design matrix')
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png')
plt.savefig(dmat_outfile, bbox_inches="tight", dpi=200)

# specify contrasts
contrasts = {}
_, matrix, names = check_design_matrix(design_matrix)
contrast_matrix = np.eye(len(names))
for i in range(len(names)):
Beispiel #5
0
def first_level(subject_id):
    '''
    Launch the first level analysis for one subject
    ROOTDIR is an variable of environnmenent where your data are stored.
    ROOTDIR needs to be set up by the user.
    See: https://github.com/neurospin/pypreprocess/
    Tape: python first_level.py
    

    Keyword arguments:
    subject_id -- Name of the subject
    '''

    # Configure paths
    data_dir = os.path.join(os.environ["ROOTDIR"], "dataset", "bids_dataset",
                            subject_id)
    output_dir = os.path.join(os.environ["ROOTDIR"], "processed_data",
                              subject_id)
    subject_session_output_dir = os.path.join(output_dir, 'res_stats')
    if not os.path.exists(subject_session_output_dir):
        os.makedirs(subject_session_output_dir)

    # Experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.4
    drift_model = 'blank'
    #hrf_model = 'canonical'  # hemodynamic reponse function
    hrf_model = 'spm'  # hemodynamic reponse function
    hfcut = 128.
    n_scans = 128

    # Preparation of paradigm
    events_file = glob.glob(
        os.path.join(data_dir, 'func/*_task-standartloc_events.tsv'))[0]
    print events_file
    paradigm = paradigm_contrasts.localizer_paradigm(events_file)

    # Build design matrix
    frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    design_matrix = make_design_matrix(
        frametimes,
        paradigm,
        hrf_model=hrf_model,
        drift_model=drift_model,
        period_cut=hfcut,
    )

    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    # Fit GLM
    fmri_file = glob.glob(
        os.path.join(output_dir, 'func/wra*_task-standartloc_bold.nii.gz'))[0]
    print 'Fitting a GLM (this takes time)...'
    #    fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_files[0],
    #                           [design_matrix for design_matrix in design_matrices]
    #                           )
    #    fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file,
    #                               [design_matrix for design_matrix in design_matrices]
    #                               )
    fmri_glm = FirstLevelGLM(noise_model='ar1',
                             standardize=False).fit(fmri_file, design_matrix)

    # Save computed mask
    mask_images = []
    mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    nibabel.save(fmri_glm.masker_.mask_img_, mask_path)
    mask_images.append(mask_path)

    # Compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.transform(
            [contrast_val] * 1,
            contrast_name=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)

        # Store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir,
                                    '%s%s.nii.gz' % (subject_id, contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # Do stats report
    anat_file = glob.glob(os.path.join(output_dir, 'anat/w*_T1w.nii.gz'))[0]
    anat_img = nibabel.load(anat_file)
    stats_report_filename = os.path.join(subject_session_output_dir,
                                         "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.masker_.mask_img_,
        threshold=2.3,
        cluster_th=15,
        anat=anat_img,
        anat_affine=anat_img.get_affine(),
        design_matrices=[design_matrix],
        paradigm=paradigm,
        subject_id=subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_id,
        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
    )

    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps