def tortoise(*args):
        print(args)
        print(
            'Fitting a "Fixed Effect" GLM for merging LR and RL '
            'phase-encoding directions for subject %s ...' %
            subject_data.subject_id)
        fmri_glm = FMRILinearModel(subject_data.func,
                                   [design_matrix.matrix
                                    for design_matrix in design_matrices],
                                   mask='compute'
                                   )
        fmri_glm.fit(do_scaling=True, model='ar1')
        print("... done.\r\n")

        # save computed mask
        mask_path = os.path.join(subject_data.output_dir, "mask.nii")
        print("Saving mask image to %s ..." % mask_path)
        nibabel.save(fmri_glm.mask, mask_path)
        print("... done.\r\n")

        z_maps = {}
        effects_maps = {}
        map_dirs = {}
        try:
            for contrast_id, contrast_val in contrasts.items():
                print("\tcontrast id: %s" % contrast_id)
                z_map, eff_map = fmri_glm.contrast(
                    contrast_val,
                    con_id=contrast_id,
                    output_z=True,
                    output_effects=True
                    )

                # store stat maps to disk
                for map_type, out_map in zip(['z', 'effects'],
                                             [z_map, eff_map]):
                    map_dir = os.path.join(
                        subject_data.output_dir, '%s_maps' % map_type)
                    map_dirs[map_type] = map_dir
                    if not os.path.exists(map_dir):
                        os.makedirs(map_dir)
                    map_path = os.path.join(
                        map_dir, '%s_%s.nii' % (map_type, contrast_id))
                    print("\t\tWriting %s ..." % map_path)

                    nibabel.save(out_map, map_path)

                    # collect zmaps for contrasts we're interested in
                    if map_type == 'z':
                        z_maps[contrast_id] = map_path

                    if map_type == 'effects':
                        effects_maps[contrast_id] = map_path

            return effects_maps, z_maps, mask_path, map_dirs
        except:
            return None
Esempio n. 2
0
    def preprocess_files(self, func_files, anat_files=None, verbose=1):
        def get_beta_filepath(func_file, cond):
            return func_file.replace('_bold.nii.gz', '_beta-%s.nii.gz' % cond)

        beta_files = []
        for fi, func_file in enumerate(func_files):
            # Don't re-do preprocessing.
            beta_mask = func_file.replace('_bold.nii.gz', '_beta*.nii.gz')

            cond_file = func_file.replace('_bold.nii.gz', '_events.tsv')
            cond_data = pd.read_csv(cond_file, sep='\t')

            # Get condition info, to search if betas have been done.
            conditions = cond_data['trial_type'].tolist()
            all_conds = np.unique(conditions)
            all_beta_files = [
                get_beta_filepath(func_file, cond) for cond in all_conds
            ]
            # All betas are done.
            if np.all([os.path.exists(f) for f in all_beta_files]):
                beta_files += all_beta_files
                continue

            if verbose >= 0:
                print('Preprocessing file %d of %d' %
                      (fi + 1, len(func_files)))

            # Need to do regression.
            tr = cond_data['duration'].as_matrix().mean()
            onsets = cond_data['onset'].tolist()

            img = nibabel.load(func_file)
            n_scans = img.shape[3]
            frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

            # Create the design matrix
            paradigm = EventRelatedParadigm(conditions, onsets)
            design_mat = dm.make_dmtx(frametimes,
                                      paradigm,
                                      drift_model='cosine',
                                      hfcut=n_scans,
                                      hrf_model='canonical')

            # Do the GLM
            mask_img = compute_epi_mask(img)
            fmri_glm = FMRILinearModel(img, design_mat.matrix, mask=mask_img)
            fmri_glm.fit(do_scaling=True, model='ar1')

            # Pull out the betas
            beta_hat = fmri_glm.glms[0].get_beta(
            )  # Least-squares estimates of the beta
            mask = fmri_glm.mask.get_data() > 0

            # output beta images
            dim = design_mat.matrix.shape[1]
            beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim)
            beta_map[mask] = beta_hat.T
            beta_image = nibabel.Nifti1Image(beta_map, fmri_glm.affine)
            beta_image.get_header()['descrip'] = (
                'Parameter estimates of the localizer dataset')

            # Save beta images
            for ci, cond in enumerate(np.unique(conditions)):
                beta_cond_img = index_img(beta_image, ci)
                beta_filepath = get_beta_filepath(func_file, cond)
                nibabel.save(beta_cond_img, beta_filepath)
                beta_files.append(beta_filepath)

        return beta_files
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png')
pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200)

# specify contrasts
contrasts = {}
n_columns = len(design_matrix.names)
for i in xrange(paradigm.n_conditions):
    contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

# more interesting contrasts"""
contrasts['active-rest'] = contrasts['active'] - contrasts['rest']

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(nibabel.concat_images(subject_data.func[0]),
                           design_matrix.matrix,
                           mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
anat_img = nibabel.load(subject_data.anat)
anat = anat_img.get_data()
anat_affine = anat_img.get_affine()

print "Computing contrasts .."
z_maps = {}
Esempio n. 4
0
def do_subject_glm(subject_data):
    """FE analysis for a single subject."""
    subject_id = subject_data['subject_id']
    output_dir = subject_data["output_dir"]
    func_files = subject_data['func']
    anat = subject_data['anat']
    onset_files = subject_data['onset']
    # subject_id = os.path.basename(subject_dir)
    # subject_output_dir = os.path.join(output_dir, subject_id)
    mem = Memory(os.path.join(output_dir, "cache"))
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # glob files: anat, session func files, session onset files
    # anat = glob.glob(os.path.join(subject_dir, anat_wildcard))
    # assert len(anat) == 1
    # anat = anat[0]
    # onset_files = sorted([glob.glob(os.path.join(subject_dir, session))[0]
    #                       for session in session_onset_wildcards])
    # func_files = sorted([sorted(glob.glob(os.path.join(subject_dir, session)))
    #                      for session in session_func_wildcards])

    ### Preprocess data #######################################################
    if 0:
        subject_data = mem.cache(do_subject_preproc)(
            dict(func=func_files, anat=anat, output_dir=output_dir))
        func_files = subject_data['func']
        anat = subject_data['anat']

        # reslice func images
        func_files = [mem.cache(reslice_vols)(
            sess_func,
            target_affine=nibabel.load(sess_func[0]).get_affine())
                      for sess_func in func_files]

    ### GLM: loop on (session_bold, onse_file) pairs over the various sessions
    design_matrices = []
    for session, (func_file, onset_file) in enumerate(zip(func_files,
                                                          onset_files)):
        if isinstance(func_file, str):
            bold = nibabel.load(func_file)
        else:
            if len(func_file) == 1:
                func_file = func_file[0]
                bold = nibabel.load(func_file)
                assert len(bold.shape) == 4
                n_scans = bold.shape[-1]
                del bold
            else:
                n_scans = len(func_file)
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        conditions, onsets, durations, amplitudes = parse_onset_file(
            onset_file)
        onsets *= tr
        durations *= tr
        paradigm = BlockParadigm(con_id=conditions, onset=onsets,
                                 duration=durations, amplitude=amplitudes)
        design_matrices.append(make_dmtx(frametimes,
                                         paradigm, hrf_model=hrf_model,
                                         drift_model=drift_model,
                                         hfcut=hfcut))

    # specify contrasts
    n_columns = len(design_matrices[0].names)
    contrasts = {}
    for i in xrange(paradigm.n_conditions):
        contrasts['%s' % design_matrices[0].names[2 * i]
                  ] = np.eye(n_columns)[2 * i]

    # more interesting contrasts
    contrasts['faces-scrambled'] = contrasts['faces'
                                             ] - contrasts['scrambled']
    contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
    contrasts['effects_of_interest'] = contrasts['faces'
                                                 ] + contrasts['scrambled']

    # effects of interest F-test
    diff_contrasts = []
    for i in xrange(paradigm.n_conditions - 1):
        a = contrasts[design_matrices[0].names[2 * i]]
        b = contrasts[design_matrices[0].names[2 * (i + 1)]]
        diff_contrasts.append(a - b)
    contrasts["diff"] = diff_contrasts

    # fit GLM
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func,
                                                      check_affines=False)
                                for sess_func in func_files],
                               [design_matrix.matrix
                                for design_matrix in design_matrices],
                               mask='compute'
                               )
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(output_dir, "mask.nii.gz")

    print "Saving mask image %s" % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        if np.ndim(contrast_val) > 1:
            contrast_type = "t"
        else:
            contrast_type = "F"
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * 2,
            con_id=contrast_id,
            contrast_type=contrast_type,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True
            )

        # store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                  [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(
                output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask
#########################################
# Specify the contrasts
#########################################

# simplest ones
contrasts = {}
n_columns = len(design_matrix.names)

contrasts['audio'] = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

########################################
# Perform a GLM analysis
########################################

print('Fitting a GLM (this takes time)...')
fmri_glm = FMRILinearModel(data_file, design_matrix.matrix, mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

#########################################
# Estimate the contrasts
#########################################

print('Computing contrasts...')
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
    print('  Contrast % 2i out of %i: %s' %
          (index + 1, len(contrasts), contrast_id))
    # save the z_image
    image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id)
    z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True)
    save(z_map, image_path)
Esempio n. 6
0
def first_level(subject_dic):
    # experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.4
    drift_model = 'blank'
    hrf_model = 'canonical'  # hemodynamic reponse function
    hfcut = 128.
    n_scans = 128

    # make design matrices
    mask_images = []
    design_matrices = []
    fmri_files = subject_dic['func']

    for x in xrange(len(fmri_files)):
        paradigm = paradigm_contrasts.localizer_paradigm()

        # build design matrix
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
        )
        design_matrices.append(design_matrix)

    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    #create output directory
    subject_session_output_dir = os.path.join(subject_dic['output_dir'],
                                              'res_stats')

    if not os.path.exists(subject_session_output_dir):
        os.makedirs(subject_session_output_dir)

    # Fit GLM
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel(
        fmri_files,
        [design_matrix.matrix for design_matrix in design_matrices],
        mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    print "Saving mask image %s" % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    mask_images.append(mask_path)

    # compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * 1,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir,
                '%s%s.nii.gz' % (subject_dic['subject_id'], contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # do stats report
    anat_img = nibabel.load(subject_dic['anat'])
    stats_report_filename = os.path.join(subject_session_output_dir,
                                         "report_stats.html")

    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=2.3,
        cluster_th=15,
        anat=anat_img.get_data(),
        anat_affine=anat_img.get_affine(),
        design_matrices=design_matrix,
        subject_id="sub001",
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_dic['session_id'],

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
    )

    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps
Esempio n. 7
0
# Instantiate the parser
parser = argparse.ArgumentParser(
    description='Please provide a patient directory path')
# Required patient argument
parser.add_argument('path', help='Patient Path Directory')
args = parser.parse_args()

if args.path is None:
    parser.error("Directory Path should be added as argument")

fmri_files = [f for f in listdir(args.path) if isfile(join(args.path, f))]
mask_file = mask_generator.make_mask(args.path, "./Group_Mask")
design_files = design_matrix_generator.make_design(args.path)

multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file)

# GLM fitting
multi_session_model.fit(do_scaling=True, model='ar1')

# Compute the required contrast
print('Computing test contrast image...')
n_regressors = [np.load(f)['arr_0'].shape[1] for f in design_files]
con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors]
z_map, = multi_session_model.contrast(con)

# Show Z-map image
mean_map = multi_session_model.means[0]

print(mean_map)
plot_map(z_map.get_data(),
Esempio n. 8
0
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png')
pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200)

# specify contrasts
contrasts = {}
n_columns = len(design_matrix.names)
for i in xrange(paradigm.n_conditions):
    contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

# more interesting contrasts"""
contrasts['active-rest'] = contrasts['active'] - contrasts['rest']

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(fmri_4D_filename,
                           design_matrix.matrix,
                           mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
anat_img = nibabel.load(anat_file)
anat = anat_img.get_data()
anat_affine = anat_img.get_affine()

print "Computing contrasts .."
z_maps = {}
Esempio n. 9
0
def do_subject_glm(subject_data):
    """FE analysis for a single subject."""
    subject_id = subject_data['subject_id']
    output_dir = subject_data["output_dir"]
    func_files = subject_data['func']
    anat = subject_data['anat']
    onset_files = subject_data['onset']
    tr = subject_data['TR']
    time_units = subject_data['time_units'].lower()
    assert time_units in ["seconds", "tr", "milliseconds"]
    drift_model = subject_data['drift_model']
    hrf_model = subject_data["hrf_model"]
    hfcut = subject_data["hfcut"]
    mem = Memory(os.path.join(output_dir, "cache"))
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if 0:
        subject_data = mem.cache(do_subject_preproc)(dict(
            func=func_files, anat=anat, output_dir=output_dir))
        func_files = subject_data['func']
        anat = subject_data['anat']

        # reslice func images
        func_files = [
            mem.cache(reslice_vols)(sess_func,
                                    target_affine=nibabel.load(
                                        sess_func[0]).get_affine())
            for sess_func in func_files
        ]

    ### GLM: loop on (session_bold, onse_file) pairs over the various sessions
    design_matrices = []
    for func_file, onset_file in zip(func_files, onset_files):
        if isinstance(func_file, str):
            bold = nibabel.load(func_file)
        else:
            if len(func_file) == 1:
                func_file = func_file[0]
                bold = nibabel.load(func_file)
                assert len(bold.shape) == 4
                n_scans = bold.shape[-1]
                del bold
            else:
                n_scans = len(func_file)
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        conditions, onsets, durations, amplitudes = parse_onset_file(
            onset_file)
        if time_units == "tr":
            onsets *= tr
            durations *= tr
        elif time_units in ["milliseconds"]:
            onsets *= 1e-3
            durations *= 1e-3
        paradigm = BlockParadigm(con_id=conditions,
                                 onset=onsets,
                                 duration=durations,
                                 amplitude=amplitudes)
        design_matrices.append(
            make_dmtx(frametimes,
                      paradigm,
                      hrf_model=hrf_model,
                      drift_model=drift_model,
                      hfcut=hfcut))

    # specify contrasts
    n_columns = len(design_matrices[0].names)
    contrasts = {}
    for i in range(paradigm.n_conditions):
        contrasts['%s' %
                  design_matrices[0].names[2 * i]] = np.eye(n_columns)[2 * i]

    # effects of interest F-test
    diff_contrasts = []
    for i in range(paradigm.n_conditions - 1):
        a = contrasts[design_matrices[0].names[2 * i]]
        b = contrasts[design_matrices[0].names[2 * (i + 1)]]
        diff_contrasts.append(a - b)
    contrasts["diff"] = diff_contrasts

    # fit GLM
    print('Fitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel([
        nibabel.concat_images(sess_func, check_affines=False)
        for sess_func in func_files
    ], [design_matrix.matrix for design_matrix in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(output_dir, "mask.nii.gz")

    print("Saving mask image %s" % mask_path)
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)
        if np.ndim(contrast_val) > 1:
            contrast_type = "t"
        else:
            contrast_type = "F"
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * len(func_files),
            con_id=contrast_id,
            contrast_type=contrast_type,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask
Esempio n. 10
0
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
    mkdir(write_dir)

# Compute a population-level mask as the intersection of individual masks
grp_mask = Nifti1Image(
    intersect_masks(mask_images).astype(np.int8),
    load(mask_images[0]).get_affine())

# concatenate the individual images
first_level_image = concat_images(betas)

# set the model
design_matrix = np.ones(len(betas))[:, np.newaxis]  # only the intercept
grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask)

# GLM fitting using ordinary least_squares
grp_model.fit(do_scaling=False, model='ols')

# specify and estimate the contrast
contrast_val = np.array(([[1]]))  # the only possible contrast !
z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True)

# write the results
save(z_map, path.join(write_dir, 'one_sample_z_map.nii'))

# look at the result
vmax = max(-z_map.get_data().min(), z_map.get_data().max())
vmin = -vmax
plot_map(z_map.get_data(),
def execute_spm_auditory_glm(data, reg_motion=False):
    reg_motion = reg_motion and 'realignment_parameters' in data

    tr = 7.
    n_scans = 96
    _duration = 6
    epoch_duration = _duration * tr
    conditions = ['rest', 'active'] * 8
    duration = epoch_duration * np.ones(len(conditions))
    onset = np.linspace(0, (len(conditions) - 1) * epoch_duration,
                        len(conditions))
    paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
    hfcut = 2 * 2 * epoch_duration

    # construct design matrix
    frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    drift_model = 'Cosine'
    hrf_model = 'Canonical With Derivative'

    add_reg_names = None
    add_regs = None
    if reg_motion:
        add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
        add_regs = data['realignment_parameters'][0]
        if isinstance(add_regs, basestring):
            add_regs = np.loadtxt(add_regs)

    design_matrix = make_dmtx(frametimes,
                              paradigm, hrf_model=hrf_model,
                              drift_model=drift_model, hfcut=hfcut,
                              add_reg_names=add_reg_names,
                              add_regs=add_regs)

    # plot and save design matrix
    ax = design_matrix.show()
    ax.set_position([.05, .25, .9, .65])
    ax.set_title('Design matrix')
    dmat_outfile = os.path.join(data['output_dir'],
                                'design_matrix.png')
    pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200)
    pl.close()

    # specify contrasts
    contrasts = {}
    n_columns = len(design_matrix.names)
    for i in xrange(paradigm.n_conditions):
        contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

    # more interesting contrasts"""
    contrasts['active-rest'] = contrasts['active'] - contrasts['rest']

    # fit GLM
    print('\r\nFitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel(load_4D_img(data['func'][0]),
                               design_matrix.matrix,
                               mask='compute')

    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(data['output_dir'], "mask.nii.gz")
    print "Saving mask image %s..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute bg unto which activation will be projected
    anat_img = load_vol(data['anat'])

    anat = anat_img.get_data()

    if anat.ndim == 4:
        anat = anat[..., 0]

    anat_affine = anat_img.get_affine()

    print "Computing contrasts..."
    z_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, eff_map, var_map = fmri_glm.contrast(
            contrasts[contrast_id],
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
            )

        # store stat maps to disk
        for dtype, out_map in zip(['z', 't', 'effects', 'variance'],
                                  [z_map, t_map, eff_map, var_map]):
            map_dir = os.path.join(
                data['output_dir'], '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if contrast_id == 'active-rest' and dtype == "z":
                z_maps[contrast_id] = map_path

            print "\t\t%s map: %s" % (dtype, map_path)

        print

    # do stats report
    stats_report_filename = os.path.join(data['reports_output_dir'],
                                         "report_stats.html")
    contrasts = dict((contrast_id, contrasts[contrast_id])
                     for contrast_id in z_maps.keys())
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        design_matrices=[design_matrix],
        subject_id=data['subject_id'],
        anat=anat,
        anat_affine=anat_affine,
        cluster_th=50,  # we're only interested in this 'large' clusters

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
        )

    ProgressReport().finish_dir(data['output_dir'])

    print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def execute_spm_multimodal_fmri_glm(data, reg_motion=False):
    reg_motion = reg_motion and 'realignment_parameters' in data

    # experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.
    drift_model = 'Cosine'
    hrf_model = 'Canonical With Derivative'
    hfcut = 128.

    # make design matrices
    design_matrices = []
    for x in xrange(2):
        n_scans = data['func'][x].shape[-1]

        timing = scipy.io.loadmat(data['trials_ses%i' % (x + 1)],
                                  squeeze_me=True, struct_as_record=False)

        faces_onsets = timing['onsets'][0].ravel()
        scrambled_onsets = timing['onsets'][1].ravel()
        onsets = np.hstack((faces_onsets, scrambled_onsets))
        onsets *= tr  # because onsets were reporting in 'scans' units
        conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len(
            scrambled_onsets)
        paradigm = EventRelatedParadigm(conditions, onsets)
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

        add_reg_names = None
        add_regs = None
        if reg_motion:
            add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
            add_regs = np.loadtxt(data['realignment_parameters'][x])
            if isinstance(add_regs):
                add_regs = np.loadtxt(add_regs)
        design_matrix = make_dmtx(
            frametimes,
            paradigm, hrf_model=hrf_model,
            drift_model=drift_model, hfcut=hfcut,
            add_reg_names=add_reg_names,
            add_regs=add_regs
            )

        design_matrices.append(design_matrix)

    # specify contrasts
    contrasts = {}
    n_columns = len(design_matrix.names)
    for i in xrange(paradigm.n_conditions):
        contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

    # more interesting contrasts
    contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
    contrasts['scrambled-faces'] = contrasts['scrambled'] - contrasts['faces']
    contrasts['effects_of_interest'] = contrasts[
        'faces'] + contrasts['scrambled']

    # we've thesame contrasts over sessions, so let's replicate
    contrasts = dict((contrast_id, [contrast_val] * 2)
                     for contrast_id, contrast_val in contrasts.iteritems())

    # fit GLM
    print('\r\nFitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel([load_4D_img(sess_func)
                                for sess_func in data['func']],
                               [dmat.matrix for dmat in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(data['output_dir'], "mask.nii.gz")
    print "Saving mask image %s" % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute bg unto which activation will be projected
    anat_img = load_vol(data['anat'])

    anat = anat_img.get_data()

    if anat.ndim == 4:
        anat = anat[..., 0]

    anat_affine = anat_img.get_affine()

    print "Computing contrasts .."
    z_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, eff_map, var_map = fmri_glm.contrast(
            contrast_val,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
            )

        # store stat maps to disk
        for dtype, out_map in zip(['z', 't', 'effects', 'variance'],
                                  [z_map, t_map, eff_map, var_map]):
            map_dir = os.path.join(
                data['output_dir'], '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if dtype == 'z':
                z_maps[contrast_id] = map_path

            print "\t\t%s map: %s" % (dtype, map_path)

    # do stats report
    data['stats_report_filename'] = os.path.join(data['reports_output_dir'],
                                                 "report_stats.html")
    contrasts = dict((contrast_id, contrasts[contrast_id])
                     for contrast_id in z_maps.keys())
    generate_subject_stats_report(
        data['stats_report_filename'],
        contrasts,
        z_maps,
        fmri_glm.mask,
        anat=anat,
        anat_affine=anat_affine,
        design_matrices=design_matrices,
        subject_id=data['subject_id'],
        cluster_th=15,  # we're only interested in this 'large' clusters
        start_time=stats_start_time,

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
        )

    ProgressReport().finish_dir(data['reports_output_dir'])

    print "\r\nStatistic report written to %s\r\n" % data[
        'stats_report_filename']

    return data
Esempio n. 13
0
def execute_glm(
    doc,
    out_dir,
    contrast_definitions=None,
    outputs=None,
    glm_model='ar1',
):
    """Function to execute GLM for one subject --and perhaps multiple
    sessions thereof

    """

    stats_start_time = time.ctime()

    # study_dir = os.path.join(out_dir, doc['study'])

    if outputs is None:
        outputs = {
            'maps': False,
            'data': False,
            'mask': True,
            'model': True,
        }
    else:
        outputs['maps'] = False

    subject_id = doc['subject']
    subject_output_dir = os.path.join(out_dir, subject_id)

    _export(doc, subject_output_dir, outputs=outputs)

    params = load_glm_params(doc)

    # instantiate GLM
    fmri_glm = FMRILinearModel(params['data'], params['design_matrices'],
                               doc['mask'])

    # fit GLM
    fmri_glm.fit(do_scaling=True, model=glm_model)

    # save beta-maps to disk
    beta_map_dir = os.path.join(subject_output_dir, 'beta_maps')
    if not os.path.exists(beta_map_dir):
        os.makedirs(beta_map_dir)
    for j, glm in zip(xrange(len(fmri_glm.glms)), fmri_glm.glms):
        # XXX save array in some compressed format
        np.savetxt(
            os.path.join(beta_map_dir, "beta_map_%i.txt" % j),
            glm.get_beta(),  # array has shape (n_conditions, n_voxels)
        )

    # define contrasts
    if contrast_definitions is not None:
        params['contrasts'] = make_contrasts(params, contrast_definitions)
    contrasts = sorted(params['contrasts'][0].keys())

    _contrasts = {}
    z_maps = {}

    # compute stats maps
    for index, contrast_id in enumerate(contrasts):
        print ' study[%s] subject[%s] contrast [%s]: %i/%i' % (
            doc['study'], doc['subject'], contrast_id, index + 1,
            len(contrasts))

        contrast = [c[contrast_id] for c in params['contrasts']]
        contrast_name = contrast_id.replace(' ', '_')

        z_map, t_map, c_map, var_map = fmri_glm.contrast(
            contrast,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
        )

        for dtype, out_map in zip(['z', 't', 'c', 'variance'],
                                  [z_map, t_map, c_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name)
            nb.save(out_map, map_path)

            # collect z map
            if dtype == 'z':
                _contrasts[contrast_name] = contrast
                z_maps[contrast_name] = map_path

    # invoke a single API to handle plotting and html business for you
    subject_stats_report_filename = os.path.join(subject_output_dir,
                                                 "report_stats.html")
    glm_reporter.generate_subject_stats_report(
        subject_stats_report_filename,
        _contrasts,
        z_maps,
        doc['mask'],
        design_matrices=list(params['design_matrices']),
        subject_id=doc['subject'],
        cluster_th=15,  # 15 voxels
        start_time=stats_start_time,
        TR=doc['TR'],
        n_scans=doc['n_scans'],
        n_sessions=doc['n_sessions'],
        model=glm_model,
    )

    print "Report for subject %s written to %s" % (
        doc['subject'], subject_stats_report_filename)
Esempio n. 14
0
def _preprocess_and_analysis_subject(subject_data,
                                     slicer='z',
                                     cut_coords=6,
                                     threshold=3.,
                                     cluster_th=15,
                                     **preproc_params):
    """
    Preprocesses the subject and then fits (mass-univariate) GLM thereup.

    """

    # sanitize run_ids:
    # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage,

    # for example
    run_ids = range(9)
    if subject_data['subject_id'] == "Sub14":
        run_ids = [0] + range(2, 9)
        subject_data['func'] = [subject_data['func'][0]
                                ] + subject_data['func'][2:]
        subject_data['session_id'] = [subject_data['session_id'][0]
                                      ] + subject_data['session_id'][2:]

    # sanitize subject output dir
    if not 'output_dir' in subject_data:
        subject_data['output_dir'] = os.path.join(output_dir,
                                                  subject_data['subject_id'])

    # preprocess the data
    subject_data = do_subject_preproc(subject_data, **preproc_params)
    # chronometry
    stats_start_time = pretty_time()

    # to-be merged lists, one item per run
    paradigms = []
    frametimes_list = []
    design_matrices = []  # one
    list_of_contrast_dicts = []  # one dict per run
    n_scans = []
    for run_id in run_ids:
        _n_scans = len(subject_data.func[run_id])
        n_scans.append(_n_scans)

        # make paradigm
        paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id])

        # make design matrix
        tr = 2.
        drift_model = 'Cosine'
        hrf_model = 'Canonical With Derivative'
        hfcut = 128.
        frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
            add_regs=np.loadtxt(
                getattr(subject_data, 'realignment_parameters')[run_id]),
            add_reg_names=[
                'Translation along x axis', 'Translation along yaxis',
                'Translation along z axis', 'Rotation along x axis',
                'Rotation along y axis', 'Rotation along z axis'
            ])

        # import matplotlib.pyplot as plt
        # design_matrix.show()
        # plt.show()

        paradigms.append(paradigm)
        design_matrices.append(design_matrix)
        frametimes_list.append(frametimes)
        n_scans.append(_n_scans)

        # specify contrasts
        contrasts = {}
        n_columns = len(design_matrix.names)
        for i in xrange(paradigm.n_conditions):
            contrasts['%s' %
                      design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

        # more interesting contrasts"""
        contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[
            'Unfamiliar']
        contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar']
        contrasts[
            'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled']
        contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled']
        contrasts['Unfamiliar-Scrambled'] = contrasts[
            'Unfamiliar'] - contrasts['Scrambled']
        contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled']

        list_of_contrast_dicts.append(contrasts)

    # importat maps
    z_maps = {}
    effects_maps = {}

    # fit GLM
    print('\r\nFitting a GLM (this takes time) ..')
    fmri_glm = FMRILinearModel(
        [nibabel.concat_images(sess_func) for sess_func in subject_data.func],
        [design_matrix.matrix for design_matrix in design_matrices],
        mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    print "... done.\r\n"

    # save computed mask
    mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")

    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    print "... done.\r\n"

    # replicate contrasts across runs
    contrasts = dict(
        (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts])
        for cid, cval in contrasts.iteritems())

    # compute effects
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, eff_map = fmri_glm.contrast(contrast_val,
                                           con_id=contrast_id,
                                           output_z=True,
                                           output_stat=False,
                                           output_effects=True,
                                           output_variance=False)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]):
            map_dir = os.path.join(subject_data.output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path

            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # remove repeated contrasts
    contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems())

    # do stats report
    stats_report_filename = os.path.join(
        getattr(subject_data, 'reports_output_dir', subject_data.output_dir),
        "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=threshold,
        cluster_th=cluster_th,
        slicer=slicer,
        cut_coords=cut_coords,
        design_matrices=design_matrices,
        subject_id=subject_data.subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_data.subject_id,

        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        drift_model=drift_model,
        hrf_model=hrf_model,
        paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id])
                      for run_id in run_ids),
        frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id])
                        for run_id in run_ids),
        # fwhm=fwhm
    )

    ProgressReport().finish_dir(subject_data.output_dir)
    print "\r\nStatistic report written to %s\r\n" % stats_report_filename

    return contrasts, effects_maps, z_maps, mask_path
Esempio n. 15
0
def group_one_sample_t_test(masks,
                            effects_maps,
                            contrasts,
                            output_dir,
                            start_time=base_reporter.pretty_time(),
                            **kwargs):
    """
    Runs a one-sample t-test procedure for group analysis. Here, we are
    for each experimental condition, only interested refuting the null
    hypothesis H0: "The average effect accross the subjects is zero!"

    Parameters
    ----------
    masks: list of strings or nibabel image objects
        subject masks, one per subject

    effects_maps: list of dicts of lists
        effects maps from subject-level GLM; each entry is a dictionary;
        each entry (indexed by condition id) of this dictionary is the
        filename (or correspinding nibabel image object) for the effects
        maps for that condition (aka contrast),for that subject

    contrasts: dictionary of array_likes
        contrasts vectors, indexed by condition id

    kwargs: dict_like
        parameters can be regular `nipy.labs.viz.plot_map` parameters
        (e.g slicer="y") or any parameter we want be reported (e.g
        fwhm=[5, 5, 5])

    """

    # make output directory
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    assert len(masks) == len(effects_maps), (len(masks), len(effects_maps))

    # compute group mask
    group_mask = nibabel.Nifti1Image(
        intersect_masks(masks).astype(np.int8),
        (nibabel.load(masks[0])
         if isinstance(masks[0], basestring) else masks[0]).get_affine())

    # construct design matrix (only one covariate, namely the "mean effect")
    design_matrix = np.ones(
        len(effects_maps))[:, np.newaxis]  # only the intercept

    group_level_z_maps = {}
    group_level_t_maps = {}
    for contrast_id in contrasts:
        print "\tcontrast id: %s" % contrast_id

        # effects maps will be the input to the second level GLM
        first_level_image = nibabel.concat_images(
            [x[contrast_id] for x in effects_maps])

        # fit 2nd level GLM for given contrast
        group_model = FMRILinearModel(first_level_image, design_matrix,
                                      group_mask)
        group_model.fit(do_scaling=False, model='ols')

        # specify and estimate the contrast
        contrast_val = np.array(([[1.]]))  # the only possible contrast !
        z_map, t_map = group_model.contrast(contrast_val,
                                            con_id='one_sample %s' %
                                            contrast_id,
                                            output_z=True,
                                            output_stat=True)

        # save map
        for map_type, map_img in zip(["z", "t"], [z_map, t_map]):
            map_dir = os.path.join(output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir,
                                    'group_level_%s.nii.gz' % (contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(map_img, map_path)
            if map_type == "z":
                group_level_z_maps[contrast_id] = map_path
            elif map_type == "t":
                group_level_z_maps[contrast_id] = map_path

    # do stats report
    stats_report_filename = os.path.join(output_dir, "report_stats.html")
    generate_subject_stats_report(stats_report_filename,
                                  contrasts,
                                  group_level_z_maps,
                                  group_mask,
                                  start_time=start_time,
                                  **kwargs)

    print "\r\nStatistic report written to %s\r\n" % (stats_report_filename)

    return group_level_z_maps
Esempio n. 16
0
from nipy.labs.utils.simul_multisubject_fmri_dataset import \
     surrogate_4d_dataset
from nipy.modalities.fmri.glm import FMRILinearModel
shape = (10, 10, 10)  # simulate a cubic image
n_scans = 100  # equal to the number of frametimes
fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans)
# run the GLM
my_glm = FMRILinearModel(fmri_data, X.matrix)
# GLM fitting
my_glm.fit(do_scaling=True, model='ar1')
listen_vs_read = array([1, -1, 0, 0, 0, 0, 0])
z_map, = my_glm.contrast(listen_vs_read)
Esempio n. 17
0
# specify contrasts
contrasts = {}
n_columns = len(design_matrix.names)
for i in xrange(paradigm.n_conditions):
    contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled']

# fit GLM
print 'Fitting a GLM (this takes time)...'
fmri_glm = FMRILinearModel(
    [nibabel.concat_images(x) for x in subject_data.func],
    [design_matrix.matrix for design_matrix in design_matrices],
    mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)
mask_images.append(mask_path)

# compute contrasts
z_maps = {}
effects_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, effects_map, var_map = fmri_glm.contrast(
#########################################

# simplest ones
contrasts = {}
n_columns = len(design_matrix.names)
for i in range(paradigm.n_conditions):
    contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

# Our contrast of interest
reading_vs_visual = contrasts["phrasevideo"] - contrasts["damier_H"]

########################################
# Perform a GLM analysis on H1
########################################

fmri_glm = FMRILinearModel(fmri_data, design_matrix.matrix, mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

# Estimate the contrast
z_map, = fmri_glm.contrast(reading_vs_visual, output_z=True)

# Plot the contrast
vmax = max(-z_map.get_data().min(), z_map.get_data().max())
plot_map(z_map.get_data(),
         z_map.get_affine(),
         cmap=cm.cold_hot,
         vmin=-vmax,
         vmax=vmax,
         slicer='z',
         black_bg=True,
         threshold=2.5,