예제 #1
0
def test_first_level_model_design_creation():
        # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 1.0
        slice_time_ref = 0.
        paradigm = basic_paradigm()
        model = FirstLevelModel(t_r, slice_time_ref, mask=mask,
                                drift_model='polynomial', drift_order=3)
        model = model.fit(func_img, paradigm)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = func_img.get_data().shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_design_matrix(frame_times, paradigm,
                                    drift_model='polynomial', drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
예제 #2
0
def test_show_design_matrix():
    # test that the show code indeed (formally) runs
    frame_times = np.linspace(0, 127 * 1., 128)
    DM = make_design_matrix(
        frame_times, drift_model='polynomial', drift_order=3)
    ax = plot_design_matrix(DM)
    assert (ax is not None)
예제 #3
0
def test_show_design_matrix():
    # test that the show code indeed (formally) runs
    frame_times = np.linspace(0, 127 * 1., 128)
    DM = make_design_matrix(
        frame_times, drift_model='polynomial', drift_order=3)
    ax = plot_design_matrix(DM)
    assert (ax is not None)
예제 #4
0
파일: test_dmtx.py 프로젝트: mrahim/nistats
def design_matrix_light(
    frame_times,
    paradigm=None,
    hrf_model="glover",
    drift_model="cosine",
    period_cut=128,
    drift_order=1,
    fir_delays=[0],
    add_regs=None,
    add_reg_names=None,
    min_onset=-24,
    path=None,
):
    """ Idem make_design_matrix, but only returns the computed matrix
    and associated names """
    dmtx = make_design_matrix(
        frame_times,
        paradigm,
        hrf_model,
        drift_model,
        period_cut,
        drift_order,
        fir_delays,
        add_regs,
        add_reg_names,
        min_onset,
    )
    _, matrix, names = check_design_matrix(dmtx)
    return matrix, names
예제 #5
0
파일: test_dmtx.py 프로젝트: mrahim/nistats
def test_design_matrix0c():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(
        make_design_matrix(frame_times, drift_model="polynomial", drift_order=3, add_regs=ax)
    )
    assert_almost_equal(X[:, 0], ax[:, 0])
    ax = np.random.randn(127, 4)
    assert_raises_regex(
        AssertionError,
        "Incorrect specification of additional regressors:.",
        make_design_matrix,
        frame_times,
        add_regs=ax,
    )
    ax = np.random.randn(128, 4)
    assert_raises_regex(
        ValueError,
        "Incorrect number of additional regressor names.",
        make_design_matrix,
        frame_times,
        add_regs=ax,
        add_reg_names="",
    )
예제 #6
0
파일: test_dmtx.py 프로젝트: mrahim/nistats
def test_design_matrix0():
    # Test design matrix creation when no paradigm is provided
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    _, X, names = check_design_matrix(make_design_matrix(frame_times, drift_model="polynomial", drift_order=3))
    assert_equal(len(names), 4)
    x = np.linspace(-0.5, 0.5, 128)
    assert_almost_equal(X[:, 0], x)
예제 #7
0
def design_matrix(n_scans,
                  tr,
                  onsets,
                  conditions,
                  durations=None,
                  hrf_model='spm',
                  drift_model='cosine'):
    """
    Fits a Ridge regression on the data, using cross validation to choose the
    value of alpha.

    Parameters
    ----------

    n_scans: int
        number of scans in the session

    tr: float
        repetition time for the BOLD data

    onsets: array of shape [n_stimuli]
        onset times for stimuli in the session

    conditions: array of shape [n_stimuli]
        labels for stimuli in the session

    durations: array of shape [n_stimuli], optional
        durations for stimuli in the session

    hrf_model: {'spm', 'spm + derivative', 'spm + derivative + dispersion',
                'glover', 'glover + derivative',
                'glover + derivative + dispersion', 'fir'}
        HRF model to be used for creating the design matrix

    drift_model: {'polynomial', 'cosine', 'blank'}
        drift model to be used for creating the design matrix

    Returns
    -------

    design: numpy array of size [n_scans, n_regressors]
        design matrix for the given stimuli

    """
    frame_times = np.arange(n_scans) * tr
    paradigm = {}
    paradigm['onset'] = onsets
    paradigm['name'] = conditions
    if durations is not None:
        paradigm['duration'] = durations
    paradigm = pd.DataFrame(paradigm)

    design = make_design_matrix(frame_times,
                                paradigm,
                                hrf_model=hrf_model,
                                drift_model=drift_model)

    return design
예제 #8
0
def test_design_matrix0d():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(make_design_matrix(
            frame_times, drift_model='polynomial', drift_order=3, add_regs=ax))
    assert_equal(len(names), 8)
    assert_equal(X.shape[1], 8)
예제 #9
0
파일: test_dmtx.py 프로젝트: mrahim/nistats
def test_spm_1():
    # Check that the nistats design matrix is close enough to the SPM one
    # (it cannot be identical, because the hrf shape is different)
    frame_times = np.linspace(0, 99, 100)
    conditions = ["c0", "c0", "c0", "c1", "c1", "c1", "c2", "c2", "c2"]
    onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
    paradigm = pd.DataFrame({"name": conditions, "onset": onsets})
    X1 = make_design_matrix(frame_times, paradigm, drift_model="blank")
    _, matrix, _ = check_design_matrix(X1)
    spm_design_matrix = DESIGN_MATRIX["arr_0"]
    assert_true(((spm_design_matrix - matrix) ** 2).sum() / (spm_design_matrix ** 2).sum() < 0.1)
예제 #10
0
def test_design_matrix0():
    # Test design matrix creation when no paradigm is provided
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)

    _, X, names = check_design_matrix(
        make_design_matrix(frame_times,
                           drift_model='polynomial',
                           drift_order=3))
    assert_equal(len(names), 4)
    assert_almost_equal(X[:, 0], np.linspace(-0.5, .5, 128))
예제 #11
0
def test_design_matrix0c():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(
        make_design_matrix(frame_times,
                           drift_model='polynomial',
                           drift_order=3,
                           add_regs=ax))
    assert_almost_equal(X[:, 0], ax[:, 0])
예제 #12
0
def test_spm_1():
    # Check that the nistats design matrix is close enough to the SPM one
    # (it cannot be identical, because the hrf shape is different)
    frame_times = np.linspace(0, 99, 100)
    conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
    onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
    paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})
    X1 = make_design_matrix(frame_times, paradigm, drift_model='blank')
    _, matrix, _ = check_design_matrix(X1)
    spm_design_matrix = DESIGN_MATRIX['arr_0']
    assert_true(((spm_design_matrix - matrix)**2).sum() /
                (spm_design_matrix**2).sum() < .1)
예제 #13
0
def test_spm_1():
    # Check that the nistats design matrix is close enough to the SPM one
    # (it cannot be identical, because the hrf shape is different)
    frame_times = np.linspace(0, 99, 100)
    conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
    onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
    paradigm = pd.DataFrame({'name': conditions,
                          'onset': onsets})
    X1 = make_design_matrix(frame_times, paradigm, drift_model='blank')
    _, matrix, _ = check_design_matrix(X1)
    spm_design_matrix = DESIGN_MATRIX['arr_0']
    assert_true(((spm_design_matrix - matrix) ** 2).sum() /
                (spm_design_matrix ** 2).sum() < .1)
예제 #14
0
def design_matrix(n_scans, tr, onsets, conditions, durations=None,
                  hrf_model='spm', drift_model='cosine'):
    """ """
    frame_times = np.arange(n_scans) * tr
    paradigm = {}
    paradigm['onset'] = onsets
    paradigm['name'] = conditions
    if durations is not None:
        paradigm['duration'] = durations
    paradigm = pd.DataFrame(paradigm)

    X = make_design_matrix(frame_times, paradigm, hrf_model=hrf_model,
                           drift_model=drift_model)
    return X
예제 #15
0
def preprocess_varpar(num, subj, subj_dir, **kwargs):
    from nistats.design_matrix import make_design_matrix
    from nistats.first_level_model import run_glm
    bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
    bold_path = os.path.join(DATA_DIR, subj, bold_path)
    mask = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'brain_mask.nii.gz')
    bold = load(bold_path)
    masker = NiftiMasker(mask)
    data = masker.fit_transform(bold)
    dmat = make_design_matrix(np.arange(data.shape[0])*TR, hrf_model='fir', drift_order=5,
                              **kwargs)
    labels, results = run_glm(data, dmat, noise_model='ols', verbose=1)
    img = masker.inverse_transform(StandardScaler().fit_transform(results[0.0].resid))
#    return StandardScaler().fit_transform(results[0.0].resid)
    save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
예제 #16
0
파일: test_dmtx.py 프로젝트: mrahim/nistats
def test_csv_io():
    # test the csv io on design matrices
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    paradigm = modulated_event_paradigm()
    DM = make_design_matrix(frame_times, paradigm, hrf_model="glover", drift_model="polynomial", drift_order=3)
    path = "design_matrix.csv"
    with InTemporaryDirectory():
        DM.to_csv(path)
        DM2 = pd.DataFrame().from_csv(path)

    _, matrix, names = check_design_matrix(DM)
    _, matrix_, names_ = check_design_matrix(DM2)
    assert_almost_equal(matrix, matrix_)
    assert_equal(names, names_)
예제 #17
0
def design_matrix(design, tr):
    """Construct a design matrix using given parameter and nistat package.

    :param design: Design (dictionnary)
    :param tr: Repetition time (in seconds)
    :return: The design matrix given by nistats
    """
    # The total duration is equal to the last onset + the last stimulus duration + the last ITI
    total_duration = design['onset'][-1] + design['duration'][-1] + design[
        'ITI'][-1]
    n_scans = np.ceil(total_duration / tr)
    frame_times = np.arange(n_scans) * tr

    x = make_design_matrix(frame_times,
                           pd.DataFrame(design),
                           drift_model='blank')

    return x
예제 #18
0
def design_matrix_light(frame_times,
                        paradigm=None,
                        hrf_model='canonical',
                        drift_model='cosine',
                        period_cut=128,
                        drift_order=1,
                        fir_delays=[0],
                        add_regs=None,
                        add_reg_names=None,
                        min_onset=-24,
                        path=None):
    """ Idem make_design_matrix, but only returns the computed matrix
    and associated names """
    dmtx = make_design_matrix(frame_times, paradigm, hrf_model, drift_model,
                              period_cut, drift_order, fir_delays, add_regs,
                              add_reg_names, min_onset)
    _, matrix, names = check_design_matrix(dmtx)
    return matrix, names
예제 #19
0
def test_csv_io():
    # test the csv io on design matrices
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    paradigm = modulated_event_paradigm()
    DM = make_design_matrix(frame_times,
                            paradigm,
                            hrf_model='Canonical',
                            drift_model='polynomial',
                            drift_order=3)
    path = 'design_matrix.csv'
    with InTemporaryDirectory():
        DM.to_csv(path)
        DM2 = pd.DataFrame().from_csv(path)

    _, matrix, names = check_design_matrix(DM)
    _, matrix_, names_ = check_design_matrix(DM2)
    assert_almost_equal(matrix, matrix_)
    assert_equal(names, names_)
예제 #20
0
def design_matrix(n_scans,
                  tr,
                  onsets,
                  conditions,
                  durations=None,
                  hrf_model='spm',
                  drift_model='cosine'):
    """ """
    frame_times = np.arange(n_scans) * tr
    paradigm = {}
    paradigm['onset'] = onsets
    paradigm['name'] = conditions
    if durations is not None:
        paradigm['duration'] = durations
    paradigm = pd.DataFrame(paradigm)

    X = make_design_matrix(frame_times,
                           paradigm,
                           hrf_model=hrf_model,
                           drift_model=drift_model)
    return X
예제 #21
0
def get_design_matrix(event_file, n_scans):
    events = pd.read_csv(event_file, index_col=0)

    # Align to trimmed functional data
    events = events[events['onset'] >= 40.5]
    events['onset'] -= 40.5
    events = events.sort_values('onset').reset_index(drop=True)
    print('Raw events shape: ' + str(events.shape))

    # Resample and create FIR design matrix
    start_time = 0.0
    end_time = (n_scans - 1) * TR
    frame_times = np.linspace(start_time, end_time, n_scans)
    fir_delays = [1, 2, 3, 4, 5]
    events['modulation'] = events['modulation'].fillna(0)
    dm = make_design_matrix(frame_times,
                            events,
                            hrf_model='fir',
                            fir_delays=fir_delays,
                            drift_model=None)
    dm = dm.drop('constant', axis=1)
    return dm
예제 #22
0
def genData(n_subs, tr, n_scans, betas, n_events):
    """
        Parameters:
            n_subs:     int
            tr:         float
            n_scans:    int
            betas:      list of tuples, each tuple contains the beta
                        for each participant per condition
    """
    n_conds = len(betas[0])
    frame_times = np.arange(n_scans) * tr
    X = pd.DataFrame()
    y = pd.DataFrame()
    for sub in range(n_subs):
        tmpbs = betas[sub]
        # create design matrix
        events = pd.DataFrame()
        events['duration'] = [1.0]*n_events
        events['onset'] = np.sort(np.random.choice(frame_times[2:-2], n_events, replace=False))
        conds = [f'cond_{x}' for x in range(n_conds)]
        events['trial_type'] = np.random.choice(conds, n_events)

        Xtmp = make_design_matrix(
            frame_times, events, hrf_model='glover')

        # create data with noise
        ytmp = pd.DataFrame()
        e = np.random.random(n_scans)*4
        y_t = np.array([tmpbs[x] * Xtmp[f'cond_{0}'].values for x in range(n_conds)])
        ytmp['y'] = y_t.sum(0) + e


        Xtmp['subject'] = [sub]*len(Xtmp)
        ytmp['subject'] = [sub]*len(ytmp)

        X = pd.concat([X, Xtmp])
        y = pd.concat([y, ytmp])

    return y, X
예제 #23
0
def single_paradigm(index, df):
    img, design_file = df.values
    n_scans = check_niimg(img).shape[3]
    slice_time_ref = 0.
    hrf_model = 'glover'
    drift_model = 'cosine'
    period_cut = 128
    drift_order = 1
    fir_delays = [0]
    min_onset = -24
    t_r = 0.8

    subject, session, direction = index
    trial_types, timing_files, contrasts = read_fsl_design_file(
        design_file)

    # fix timing filenames as we load the fsl file one directory
    # higher than expected
    timing_files = [tf.replace("EVs", "tfMRI_%s_%s/EVs" % (
        session, direction)) for tf in timing_files]

    # make design matrix
    events = make_paradigm_from_timing_files(timing_files,
                                             trial_types=trial_types)
    start_time = slice_time_ref * t_r
    end_time = (n_scans - 1 + slice_time_ref) * t_r
    frame_times = np.linspace(start_time, end_time, n_scans)
    design = make_design_matrix(frame_times, events,
                                hrf_model, drift_model,
                                period_cut, drift_order,
                                fir_delays, None, None, min_onset)
    output_dir = expanduser('~/data/HCP_masked')
    for batch_num, batch in enumerate(gen_batches(n_scans, 300)):
        name = '%s_%s_%s_%i_design' % (subject, session, direction,
                                       batch_num)
        np.save(join(output_dir, name), design[batch])
예제 #24
0
def test_design_matrix0c():
    # test design matrix creation when regressors are provided manually
    tr = 1.0
    frame_times = np.linspace(0, 127 * tr, 128)
    ax = np.random.randn(128, 4)
    _, X, names = check_design_matrix(
        make_design_matrix(frame_times,
                           drift_model='polynomial',
                           drift_order=3,
                           add_regs=ax))
    assert_almost_equal(X[:, 0], ax[:, 0])
    ax = np.random.randn(127, 4)
    assert_raises_regex(AssertionError,
                        "Incorrect specification of additional regressors:.",
                        make_design_matrix,
                        frame_times,
                        add_regs=ax)
    ax = np.random.randn(128, 4)
    assert_raises_regex(ValueError,
                        "Incorrect number of additional regressor names.",
                        make_design_matrix,
                        frame_times,
                        add_regs=ax,
                        add_reg_names='')
예제 #25
0
csf_filename = os.path.join("pypreprocess_output", "mwc2T1.nii.gz")
csf_time_serie = NiftiMasker(mask_img=csf_filename).fit_transform(func_fname)
print("Extracting the white matter (WM)...")
wm_filename = os.path.join("pypreprocess_output", "mwc3T1.nii.gz")
wm_time_serie = NiftiMasker(mask_img=wm_filename).fit_transform(func_fname)

dirty_data = np.vstack([csf_time_serie, wm_time_serie])

# make design matrix of PC components
print("Computing the PCA out of the CSF and the WM...")
pca = PCA(n_components=2)
pca.fit(dirty_data)
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
print("Defining the cleaning design matrix..")
design_matrix = make_design_matrix(frametimes, hrf_model='spm',
                                   add_regs=pca.components_,
                                   add_reg_names=[["csfwm_pc1", "csfwm_pc2"]])

# fit a first GLM to clean the data
print("Fitting the first GLM to clean the data...")
cleaner = FirstLevelModel(t_r=tr, slice_time_ref=0.5, noise_model='ar1',
                           standardize=False)
cleaner.fit(run_imgs=fmri_img, design_matrices=design_matrix)
dirty_fmri_img = cleaner.results_.predict(design_matrix)
print("Clean the data...")
fmri_img -= dirty_fmri_img

#########################################################################
# Cleaning the data

# extract the seed
예제 #26
0
tr = 1.0
n_scans = 128
frame_times = np.arange(n_scans) * tr

# experimental paradigm
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
onsets = [30., 70., 100., 10., 30., 90., 30., 40., 60.]
hrf_model = 'canonical'
motion = np.cumsum(np.random.randn(n_scans, 6), 0)  # simulate motion
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']

# event-related design matrix
paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})

X1 = make_design_matrix(
    frame_times, paradigm, drift_model='polynomial', drift_order=3,
    add_regs=motion, add_reg_names=add_reg_names)

# block design matrix
duration = 7. * np.ones(len(conditions))
paradigm = pd.DataFrame({'name': conditions, 'onset': onsets,
                      'duration': duration})

X2 = make_design_matrix(frame_times, paradigm, drift_model='polynomial',
                        drift_order=3)

# FIR model
paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})
hrf_model = 'FIR'
X3 = make_design_matrix(frame_times, paradigm, hrf_model='fir',
                        drift_model='polynomial', drift_order=3,
예제 #27
0
# Prepare seed
pcc_coords = (0, -53, 26)

#########################################################################
# Estimate contrasts
# ------------------
# Specify the contrasts
seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,
                                 standardize=True, low_pass=0.1,
                                 high_pass=0.01, t_r=2.,
                                 memory='nilearn_cache',
                                 memory_level=1, verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_design_matrix(frametimes, hrf_model='spm',
                                   add_regs=seed_time_series,
                                   add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0]*(design_matrix.shape[1]-1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# contrast estimation
print('Contrast seed_based_glm computed.')
def first_level(subject_dic, additional_regressors=None, compcorr=False,
                smooth=None, surface=False, mask_img=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject
    
    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled 
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whetherconfound estimation and removal should be carried out or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    start_time = time.ctime()
    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = subject_dic['hrf_model']
    hfcut = subject_dic['hfcut']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']

    if not surface and (mask_img is None):
        mask_img = masking(subject_dic['func'], subject_dic['output_dir'])

        
    if additional_regressors is None:
        additional_regressors = dict(
            [(session_id, None) for session_id in subject_dic['session_id']])

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):
        
        # Guessing paradigm from file name
        # paradigm_id = session_id[:session_id.rfind('_')]
        #paradigm_id = session_id
        #for unwanted in ['_ap', '_pa'] + ['_run%d' % d for d in range(10)]:
        #    paradigm_id = paradigm_id.replace(unwanted, '')
        paradigm_id = _session_id_to_task_id([session_id])[0]
        
        if surface:
            from nibabel.gifti import read
            n_scans = np.array([darrays.data for darrays in read(fmri_path).darrays]).shape[0]
        else:
            n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

        if surface:
            compcorr = False  # XXX Fixme
        
        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names 
        else:
            confounds = motion
            confound_names = motion_names

        if onset is None:
            warnings.warn('Onset file not provided. Trying to guess it')
            task = os.path.basename(fmri_path).split('task')[-1][4:]
            onset = os.path.join(
                os.path.split(os.path.dirname(fmri_path))[0], 'model001',
                'onsets', 'task' + task + '_run001', 'task%s.csv' % task)

        if not os.path.exists(onset):
            warnings.warn('non-existant onset file. proceeding without it')
            paradigm = None
        else:
            paradigm = make_paradigm(onset, paradigm_id)
        
        # handle manually supplied regressors
        add_reg_names = []
        if additional_regressors[session_id] is None:
            add_regs = confounds
        else:
            df = read_csv(additional_regressors[session_id])
            add_regs = []
            for regressor in df:
                add_reg_names.append(regressor)
                add_regs.append(df[regressor])
            add_regs = np.array(add_regs).T
            add_regs = np.hstack((add_regs, confounds))
        
        add_reg_names += confound_names

        # create the design matrix
        design_matrix = make_design_matrix(
            frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model,
            period_cut=hfcut, add_regs=add_regs,
            add_reg_names=add_reg_names)
        _, dmtx, names = check_design_matrix(design_matrix)
        
        # create the relevant contrasts
        contrasts = make_contrasts(paradigm_id, names)

        if surface:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_surf_%s' % session_id)
        else:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'),
                 design_matrix=design_matrix)
            
        if surface:
            run_surface_glm(
                design_matrix, contrasts, fmri_path, subject_session_output_dir)
        else:
            z_maps = run_glm(
                design_matrix, contrasts, fmri_path, mask_img, subject_dic,
                subject_session_output_dir, tr=tr, smoothing_fwhm=smooth)
            
            # do stats report
            anat_img = nib.load(subject_dic['anat'])
            stats_report_filename = os.path.join(
                subject_session_output_dir, 'report_stats.html')
            # paradigm_dict = paradigm.__dict__ if paradigm is not None else None
            if paradigm is not None:
                paradigm_dict = {
                    'onset': paradigm['onset'].values,
                    'name': paradigm['name'].values, 
                    'duration': paradigm['duration'].values,}
            generate_subject_stats_report(
                stats_report_filename,
                contrasts,
                z_maps,
                mask_img,
                threshold=3.,
                cluster_th=15,
                anat=anat_img,
                anat_affine=anat_img.get_affine(),
                design_matrices=[design_matrix],
                subject_id=subject_dic['subject_id'],
                start_time=start_time,
                title="GLM for subject %s" % session_id,
                # additional ``kwargs`` for more informative report
                # paradigm=paradigm_dict,
                TR=tr,
                n_scans=n_scans,
                hfcut=hfcut,
                frametimes=frametimes,
                drift_model=drift_model,
                hrf_model=hrf_model,
            )
    if not surface:
        ProgressReport().finish_dir(subject_session_output_dir)
        print("Statistic report written to %s\r\n" % stats_report_filename)
예제 #29
0
_ = plt.plot(timeseries_all[0,:,:])

# Generate design matrix.

t_r = 0.72
n_scans = 405

onsets_dir = "/home/finc/Dropbox/GitHub/nilearn_task_networks/support/onsets_HCP.csv"

events = pd.read_csv(onsets_dir)
events

frame_times = np.arange(n_scans) * t_r
frame_times

design_matrix = make_design_matrix(frame_times, events, hrf_model = None)
design_matrix = design_matrix.reset_index()

plt.plot(design_matrix["0back"])
plt.plot(design_matrix["2back"])
plt.legend()

# Calculate correlation matrices.
conditions  = ["0back", "2back"]
sub_n = timeseries_all.shape[0]
rois_n = timeseries_all.shape[2]

correlation_matrices = np.zeros((sub_n, len(conditions), rois_n, rois_n))

for sub in range(sub_n):
    for i, cond in enumerate(conditions):
예제 #30
0
paradigm_file = heroes['paradigm'][0]

# Read the paradigm
from nistats import experimental_paradigm
paradigm = experimental_paradigm.paradigm_from_csv(
    paradigm_file)

# Create the design matrix
import numpy as np
import matplotlib.pyplot as plt
import nibabel
from nistats.design_matrix import make_design_matrix, plot_design_matrix
tr = 2.5
n_scans = nibabel.load(func_file).get_data().shape[-1]
frametimes = np.arange(0, n_scans * tr, tr)
design_matrix = make_design_matrix(frametimes, paradigm)
plot_design_matrix(design_matrix)
plt.tight_layout()

# Fit GLM
print('Fitting a GLM')
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(tr)
fmri_glm = fmri_glm.fit(func_file, design_matrices=design_matrix)

# Specify the contrasts
contrasts = {}
n_columns = len(design_matrix.columns)
for n, name in enumerate(design_matrix.columns[:3]):
    contrasts[name] = np.zeros((n_columns,))
    contrasts[name][n] = 1
예제 #31
0
파일: nistats.py 프로젝트: cmaumet/fitlins
    def _run_interface(self, runtime):
        info = self.inputs.session_info

        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        events = pd.read_hdf(info['events'], key='events')

        if info['confounds'] is not None and info['confounds'] != 'None':
            confounds = pd.read_hdf(info['confounds'], key='confounds')
            confound_names = confounds.columns.tolist()
            drift_model = None if 'Cosine00' in confound_names else 'cosine'
        else:
            confounds = None
            confound_names = None
            drift_model = 'cosine'

        if isdefined(self.inputs.contrast_info):
            contrast_spec = pd.read_hdf(self.inputs.contrast_info,
                                        key='contrasts')
        else:
            contrast_spec = pd.DataFrame()

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            paradigm=events.rename(columns={
                'condition': 'trial_type',
                'amplitude': 'modulation'
            }),
            add_regs=confounds,
            add_reg_names=confound_names,
            drift_model=drift_model,
        )

        # Assume that explanatory variables == HRF-convolved variables
        exp_vars = events['condition'].unique().tolist()

        contrast_matrix, contrast_types = build_contrast_matrix(
            contrast_spec, mat, exp_vars)

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')

        contrast_matrix.to_csv('contrasts.tsv', sep='\t')
        self._results['contrast_matrix'] = os.path.join(
            runtime.cwd, 'contrasts.tsv')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        flm = level1.FirstLevelModel(mask=mask_file)
        flm.fit(img, design_matrices=mat)

        contrast_maps = []
        contrast_metadata = []
        stat_fmt = os.path.join(runtime.cwd, '{}.nii.gz').format
        for contrast, ctype in zip(contrast_matrix, contrast_types):
            es = flm.compute_contrast(contrast_matrix[contrast].values, {
                'T': 't',
                'F': 'F'
            }[ctype],
                                      output_type='effect_size')
            es_fname = stat_fmt(contrast)
            es.to_filename(es_fname)

            contrast_maps.append(es_fname)
            contrast_metadata.append({'contrast': contrast, 'type': 'effect'})
        self._results['contrast_maps'] = contrast_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
def generate_spikes_time_series(n_events=200, n_blank_events=50,
                                event_spacing=6, t_r=2, hrf_length=32.,
                                event_types=['ev1', 'ev2'], period_cut=64,
                                jitter_min=-1, jitter_max=1, drift_order=1,
                                return_jitter=False, time_offset=10,
                                modulation=None, seed=None, f_hrf=None):
    """Voxel-level activations

    Parameters
    ----------
    n_events
    n_blank_events
    event_spacing
    t_r
    event_types
    period_cut
    jitter_min
    jitter_max
    return_jitter
    time_offset
    modulation
    seed


    Returns
    -------
    paradigm
    design
    modulation
    measurement_times
    """

    rng = check_random_state(seed)
    event_types = np.array(event_types)

    all_times = (1. + np.arange(n_events + n_blank_events)) * event_spacing
    non_blank_events = rng.permutation(len(all_times))[:n_events]
    onsets = np.sort(all_times[non_blank_events])

    names = event_types[rng.permutation(n_events) % len(event_types)]
    measurement_times = np.arange(0., onsets.max() + time_offset, t_r)

    if modulation is None:
        modulation = np.ones_like(onsets)

    # Jittered paradigm
    if return_jitter:
        onsets += rng.uniform(jitter_min, jitter_max, len(onsets))

    paradigm = pd.DataFrame.from_dict(dict(onset=onsets, name=names))

    if f_hrf is None:
        design = make_design_matrix(measurement_times, paradigm=paradigm,
                                    period_cut=period_cut,
                                    drift_order=drift_order)
    else:
        design = make_design_matrix_hrf(measurement_times, paradigm=paradigm,
                                        period_cut=period_cut,
                                        drift_order=drift_order,
                                        hrf_length=hrf_length,
                                        t_r=t_r, time_offset=time_offset,
                                        f_hrf=f_hrf)

    return paradigm, design, modulation, measurement_times
예제 #33
0
n_scans = 128
tr = 2.4
frame_times = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans)

# data
data = datasets.fetch_localizer_first_level()
paradigm_file = data.paradigm
fmri_img = data.epi_img

### Design matrix ########################################

paradigm = pd.read_csv(paradigm_file, sep=' ', header=None, index_col=None)
paradigm.columns = ['session', 'name', 'onset']
n_conditions = len(paradigm.name.unique())
design_matrix = make_design_matrix(
    frame_times, paradigm, hrf_model='glover + derivative',
    drift_model='cosine', period_cut=128)

### Perform a GLM analysis ########################################

fmri_glm = FirstLevelGLM().fit(fmri_img, design_matrix)

### Estimate contrasts #########################################

# Specify the contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
    contrasts["calculaudio"] + contrasts["phraseaudio"]
예제 #34
0
def denoise(img_file,
            tsv_file,
            out_path,
            col_names=False,
            hp_filter=False,
            lp_filter=False,
            out_figure_path=False):
    nii_ext = '.nii.gz'
    FD_thr = [.5]
    sc_range = np.arange(-1, 3)
    constant = 'constant'

    # read in files
    img = load_niimg(img_file)
    # get file info
    img_name = os.path.basename(img.get_filename())
    file_base = img_name[0:img_name.find('.')]
    save_img_file = pjoin(out_path, file_base + \
                          '_NR' + nii_ext)
    data = img.get_data()
    df_orig = pandas.read_csv(tsv_file, '\t', na_values='n/a')
    df = copy.deepcopy(df_orig)
    Ntrs = df.as_matrix().shape[0]
    print('# of TRs: ' + str(Ntrs))
    assert (Ntrs == data.shape[len(data.shape) - 1])

    # select columns to use as nuisance regressors
    if col_names:
        df = df[col_names]
        str_append = '  [SELECTED regressors in CSV]'
    else:
        col_names = df.columns.tolist()
        str_append = '  [ALL regressors in CSV]'

    # fill in missing nuisance values with mean for that variable
    for col in df.columns:
        if sum(df[col].isnull()) > 0:
            print('Filling in ' + str(sum(df[col].isnull())) +
                  ' NaN value for ' + col)
            df[col] = df[col].fillna(np.mean(df[col]))
    print('# of Confound Regressors: ' + str(len(df.columns)) + str_append)

    # implement HP filter in regression
    TR = img.header.get_zooms()[-1]
    frame_times = np.arange(Ntrs) * TR
    if hp_filter:
        hp_filter = float(hp_filter)
        assert (hp_filter > 0)
        period_cutoff = 1. / hp_filter
        df = make_design_matrix(frame_times,
                                period_cut=period_cutoff,
                                add_regs=df.as_matrix(),
                                add_reg_names=df.columns.tolist())
        # fn adds intercept into dm

        hp_cols = [col for col in df.columns if 'drift' in col]
        print('# of High-pass Filter Regressors: ' + str(len(hp_cols)))
    else:
        # add in intercept column into data frame
        df[constant] = 1
        print('No High-pass Filter Applied')

    dm = df.as_matrix()

    # prep data
    data = np.reshape(data, (-1, Ntrs))
    data_mean = np.mean(data, axis=1)
    Nvox = len(data_mean)

    # setup and run regression
    model = regression.OLSModel(dm)
    results = model.fit(data.T)
    if not hp_filter:
        results_orig_resid = copy.deepcopy(
            results.resid)  # save for rsquared computation

    # apply low-pass filter
    if lp_filter:
        # input to butterworth fn is time x voxels
        low_pass = float(lp_filter)
        Fs = 1. / TR
        if low_pass >= Fs / 2:
            raise ValueError(
                'Low pass filter cutoff if too close to the Nyquist frequency (%s)'
                % (Fs / 2))

        temp_img_file = pjoin(out_path, file_base + \
                              '_temp' + nii_ext)
        temp_img = nb.Nifti1Image(np.reshape(
            results.resid.T + np.reshape(data_mean, (Nvox, 1)),
            img.shape).astype('float32'),
                                  img.affine,
                                  header=img.header)
        temp_img.to_filename(temp_img_file)
        results.resid = butterworth(results.resid,
                                    sampling_rate=Fs,
                                    low_pass=low_pass,
                                    high_pass=None)
        print('Low-pass Filter Applied: < ' + str(low_pass) + ' Hz')

    # add mean back into data
    clean_data = results.resid.T + np.reshape(
        data_mean, (Nvox, 1))  # add mean back into residuals

    # save out new data file
    print('Saving output file...')
    clean_data = np.reshape(clean_data, img.shape).astype('float32')
    new_img = nb.Nifti1Image(clean_data, img.affine, header=img.header)
    new_img.to_filename(save_img_file)

    ######### generate Rsquared map for confounds only
    if hp_filter:
        # first remove low-frequency information from data
        hp_cols.append(constant)
        model_first = regression.OLSModel(df[hp_cols].as_matrix())
        results_first = model_first.fit(data.T)
        results_first_resid = copy.deepcopy(results_first.resid)
        del results_first, model_first

        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(results_first_resid -
                           np.mean(results_first_resid, axis=0),
                           axis=0))

        # now regress out 'true' confounds to estimate their Rsquared
        nr_cols = [col for col in df.columns if 'drift' not in col]
        model_second = regression.OLSModel(df[nr_cols].as_matrix())
        results_second = model_second.fit(results_first_resid)

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_second.resid, axis=0))

        del results_second, model_second, results_first_resid

    elif not hp_filter:
        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(data.T - np.mean(data.T, axis=0), axis=0))

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_orig_resid, axis=0))

        del results_orig_resid

    # compute rsquared of nuisance regressors
    zero_idx = scipy.logical_and(sst == 0, sse == 0)
    sse[zero_idx] = 1
    sst[zero_idx] = 1  # would be NaNs - become rsquared = 0
    rsquare = 1 - np.true_divide(sse, sst)
    rsquare[np.isnan(rsquare)] = 0

    ######### Visualizing DM & outputs
    fontsize = 12
    fontsize_title = 14
    def_img_size = 8

    if not out_figure_path:
        out_figure_path = save_img_file[0:save_img_file.find('.')] + '_figures'

    if not os.path.isdir(out_figure_path):
        os.mkdir(out_figure_path)
    png_append = '_' + img_name[0:img_name.find('.')] + '.png'
    print('Output directory: ' + out_figure_path)

    # DM corr matrix
    cm = df[df.columns[0:-1]].corr()
    curr_sz = copy.deepcopy(def_img_size)
    if cm.shape[0] > def_img_size:
        curr_sz = curr_sz + ((cm.shape[0] - curr_sz) * .3)
    mtx_scale = curr_sz * 100

    mask = np.zeros_like(cm, dtype=np.bool)
    mask[np.triu_indices_from(mask)] = True

    fig, ax = plt.subplots(figsize=(curr_sz, curr_sz))
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    sns.heatmap(cm,
                mask=mask,
                cmap=cmap,
                center=0,
                vmax=cm[cm < 1].max().max(),
                vmin=cm[cm < 1].min().min(),
                square=True,
                linewidths=.5,
                cbar_kws={"shrink": .6})
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(cm.columns.tolist(),
                       rotation=-30,
                       va='bottom',
                       fontsize=fontsize)
    ax.set_title('Nuisance Corr. Matrix', fontsize=fontsize_title)
    plt.tight_layout()
    file_corr_matrix = 'Corr_matrix_regressors' + png_append
    fig.savefig(pjoin(out_figure_path, file_corr_matrix))
    plt.close(fig)
    del fig, ax

    # DM of Nuisance Regressors (all)
    tr_label = 'TR (Volume #)'
    fig, ax = plt.subplots(figsize=(curr_sz - 4.1, def_img_size))
    x_scale_html = ((curr_sz - 4.1) / def_img_size) * 890
    reporting.plot_design_matrix(df, ax=ax)
    ax.set_title('Nuisance Design Matrix', fontsize=fontsize_title)
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(ax.get_yticklabels(), fontsize=fontsize)
    ax.set_ylabel(tr_label, fontsize=fontsize)
    plt.tight_layout()
    file_design_matrix = 'Design_matrix' + png_append
    fig.savefig(pjoin(out_figure_path, file_design_matrix))
    plt.close(fig)
    del fig, ax

    # FD timeseries plot
    FD = 'FD'
    poss_names = ['FramewiseDisplacement', FD, 'framewisedisplacement', 'fd']
    fd_idx = [df_orig.columns.__contains__(i) for i in poss_names]
    if np.sum(fd_idx) > 0:
        FD_name = poss_names[fd_idx == True]
        if sum(df_orig[FD_name].isnull()) > 0:
            df_orig[FD_name] = df_orig[FD_name].fillna(
                np.mean(df_orig[FD_name]))
        y = df_orig[FD_name].as_matrix()
        Nremove = []
        sc_idx = []
        for thr_idx, thr in enumerate(FD_thr):
            idx = y >= thr
            sc_idx.append(copy.deepcopy(idx))
            for iidx in np.where(idx)[0]:
                for buffer in sc_range:
                    curr_idx = iidx + buffer
                    if curr_idx >= 0 and curr_idx <= len(idx):
                        sc_idx[thr_idx][curr_idx] = True
            Nremove.append(np.sum(sc_idx[thr_idx]))

        Nplots = len(FD_thr)
        sns.set(font_scale=1.5)
        sns.set_style('ticks')
        fig, axes = plt.subplots(Nplots,
                                 1,
                                 figsize=(def_img_size * 1.5,
                                          def_img_size / 2),
                                 squeeze=False)
        sns.despine()
        bound = .4
        fd_mean = np.mean(y)
        for curr in np.arange(0, Nplots):
            axes[curr, 0].plot(y)
            axes[curr, 0].plot((-bound, Ntrs + bound),
                               FD_thr[curr] * np.ones((1, 2))[0],
                               '--',
                               color='black')
            axes[curr, 0].scatter(np.arange(0, Ntrs), y, s=20)

            if Nremove[curr] > 0:
                info = scipy.ndimage.measurements.label(sc_idx[curr])
                for cluster in np.arange(1, info[1] + 1):
                    temp = np.where(info[0] == cluster)[0]
                    axes[curr, 0].axvspan(temp.min() - bound,
                                          temp.max() + bound,
                                          alpha=.5,
                                          color='red')

            axes[curr, 0].set_ylabel('Framewise Disp. (' + FD + ')')
            axes[curr, 0].set_title(FD + ': ' +
                                    str(100 * Nremove[curr] / Ntrs)[0:4] +
                                    '% of scan (' + str(Nremove[curr]) +
                                    ' volumes) would be scrubbed (FD thr.= ' +
                                    str(FD_thr[curr]) + ')')
            plt.text(Ntrs + 1,
                     FD_thr[curr] - .01,
                     FD + ' = ' + str(FD_thr[curr]),
                     fontsize=fontsize)
            plt.text(Ntrs,
                     fd_mean - .01,
                     'avg = ' + str(fd_mean),
                     fontsize=fontsize)
            axes[curr, 0].set_xlim((-bound, Ntrs + 8))

        plt.tight_layout()
        axes[curr, 0].set_xlabel(tr_label)
        file_fd_plot = FD + '_timeseries' + png_append
        fig.savefig(pjoin(out_figure_path, file_fd_plot))
        plt.close(fig)
        del fig, axes
        print(FD + ' timeseries plot saved')

    else:
        print(FD + ' not found: ' + FD + ' timeseries not plotted')
        file_fd_plot = None

    # Carpet and DVARS plots - before & after nuisance regression

    # need to create mask file to input to DVARS function
    mask_file = pjoin(out_figure_path, 'mask_temp.nii.gz')
    nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
    nifti_masker.fit(img)
    nifti_masker.mask_img_.to_filename(mask_file)

    # create 2 or 3 carpet plots, depending on if LP filter is also applied
    Ncarpet = 2
    total_sz = int(16)
    carpet_scale = 840
    y_labels = ['Input (voxels)', 'Output \'cleaned\'']
    imgs = [img, new_img]
    img_files = [img_file, save_img_file]
    color = ['red', 'salmon']
    labels = ['input', 'cleaned']
    if lp_filter:
        Ncarpet = 3
        total_sz = int(20)
        carpet_scale = carpet_scale * (9 / 8)
        y_labels = ['Input', 'Clean Pre-LP', 'Clean LP']
        imgs.insert(1, temp_img)
        img_files.insert(1, temp_img_file)
        color.insert(1, 'firebrick')
        labels.insert(1, 'clean pre-LP')
        labels[-1] = 'clean LP'

    dvars = []
    print('Computing dvars...')
    for in_file in img_files:
        temp = nac.compute_dvars(in_file=in_file, in_mask=mask_file)[1]
        dvars.append(np.hstack((temp.mean(), temp)))
        del temp

    small_sz = 2
    fig = plt.figure(figsize=(def_img_size * 1.5,
                              def_img_size + ((Ncarpet - 2) * 1)))
    row_used = 0
    if np.sum(fd_idx) > 0:  # if FD data is available
        row_used = row_used + small_sz
        ax0 = plt.subplot2grid((total_sz, 1), (0, 0), rowspan=small_sz)
        ax0.plot(y)
        ax0.scatter(np.arange(0, Ntrs), y, s=10)
        curr = 0
        if Nremove[curr] > 0:
            info = scipy.ndimage.measurements.label(sc_idx[curr])
            for cluster in np.arange(1, info[1] + 1):
                temp = np.where(info[0] == cluster)[0]
                ax0.axvspan(temp.min() - bound,
                            temp.max() + bound,
                            alpha=.5,
                            color='red')
        ax0.set_ylabel(FD)

        for side in ["top", "right", "bottom"]:
            ax0.spines[side].set_color('none')
            ax0.spines[side].set_visible(False)

        ax0.set_xticks([])
        ax0.set_xlim((-.5, Ntrs - .5))
        ax0.spines["left"].set_position(('outward', 10))

    ax_d = plt.subplot2grid((total_sz, 1), (row_used, 0), rowspan=small_sz)
    for iplot in np.arange(len(dvars)):
        ax_d.plot(dvars[iplot], color=color[iplot], label=labels[iplot])
    ax_d.set_ylabel('DVARS')
    for side in ["top", "right", "bottom"]:
        ax_d.spines[side].set_color('none')
        ax_d.spines[side].set_visible(False)
    ax_d.set_xticks([])
    ax_d.set_xlim((-.5, Ntrs - .5))
    ax_d.spines["left"].set_position(('outward', 10))
    ax_d.legend(fontsize=fontsize - 2)
    row_used = row_used + small_sz

    st = 0
    carpet_each = int((total_sz - row_used) / Ncarpet)
    for idx, img_curr in enumerate(imgs):
        ax_curr = plt.subplot2grid((total_sz, 1), (row_used + st, 0),
                                   rowspan=carpet_each)
        fig = plotting.plot_carpet(img_curr, figure=fig, axes=ax_curr)
        ax_curr.set_ylabel(y_labels[idx])
        for side in ["bottom", "left"]:
            ax_curr.spines[side].set_position(('outward', 10))

        if idx < len(imgs) - 1:
            ax_curr.spines["bottom"].set_visible(False)
            ax_curr.set_xticklabels('')
            ax_curr.set_xlabel('')
            st = st + carpet_each

    file_carpet_plot = 'Carpet_plots' + png_append
    fig.savefig(pjoin(out_figure_path, file_carpet_plot))
    plt.close()
    del fig, ax0, ax_curr, ax_d, dvars
    os.remove(mask_file)
    print('Carpet/DVARS plots saved')
    if lp_filter:
        os.remove(temp_img_file)
        del temp_img

    # Display T-stat maps for nuisance regressors
    # create mean img
    img_size = (img.shape[0], img.shape[1], img.shape[2])
    mean_img = nb.Nifti1Image(np.reshape(data_mean, img_size), img.affine)
    mx = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            mx.append(np.max(np.absolute([con.t.min(), con.t.max()])))
    mx = .8 * np.max(mx)
    t_png = 'Tstat_'
    file_tstat = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            m_img = nb.Nifti1Image(np.reshape(con, img_size), img.affine)

            title_str = col + ' Tstat'
            fig = plotting.plot_stat_map(m_img,
                                         mean_img,
                                         threshold=3,
                                         colorbar=True,
                                         display_mode='z',
                                         vmax=mx,
                                         title=title_str,
                                         cut_coords=7)
            file_temp = t_png + col + png_append
            fig.savefig(pjoin(out_figure_path, file_temp))
            file_tstat.append({'name': col, 'file': file_temp})
            plt.close()
            del fig, file_temp
            print(title_str + ' map saved')

    # Display R-sq map for nuisance regressors
    m_img = nb.Nifti1Image(np.reshape(rsquare, img_size), img.affine)
    title_str = 'Nuisance Rsq'
    mx = .95 * rsquare.max()
    fig = plotting.plot_stat_map(m_img,
                                 mean_img,
                                 threshold=.2,
                                 colorbar=True,
                                 display_mode='z',
                                 vmax=mx,
                                 title=title_str,
                                 cut_coords=7)
    file_rsq_map = 'Rsquared' + png_append
    fig.savefig(pjoin(out_figure_path, file_rsq_map))
    plt.close()
    del fig
    print(title_str + ' map saved')

    ######### html report
    templateLoader = jinja2.FileSystemLoader(searchpath="/")
    templateEnv = jinja2.Environment(loader=templateLoader)

    templateVars = {
        "img_file": img_file,
        "save_img_file": save_img_file,
        "Ntrs": Ntrs,
        "tsv_file": tsv_file,
        "col_names": col_names,
        "hp_filter": hp_filter,
        "lp_filter": lp_filter,
        "file_design_matrix": file_design_matrix,
        "file_corr_matrix": file_corr_matrix,
        "file_fd_plot": file_fd_plot,
        "file_rsq_map": file_rsq_map,
        "file_tstat": file_tstat,
        "x_scale": x_scale_html,
        "mtx_scale": mtx_scale,
        "file_carpet_plot": file_carpet_plot,
        "carpet_scale": carpet_scale
    }

    TEMPLATE_FILE = pjoin(os.getcwd(), "report_template.html")
    template = templateEnv.get_template(TEMPLATE_FILE)

    outputText = template.render(templateVars)

    html_file = pjoin(out_figure_path,
                      img_name[0:img_name.find('.')] + '.html')
    with open(html_file, "w") as f:
        f.write(outputText)

    print('')
    print('HTML report: ' + html_file)
    return new_img
# timing
n_scans = 128
tr = 2.4
frame_times = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans)

# data
data = datasets.fetch_localizer_first_level()
paradigm_file = data.paradigm
fmri_img = data.epi_img

### Design matrix ########################################

paradigm = pd.read_csv(paradigm_file, sep=' ', header=None, index_col=None)
paradigm.columns = ['session', 'name', 'onset']
design_matrix = make_design_matrix(
    frame_times, paradigm, hrf_model='canonical with derivative',
    drift_model="cosine", period_cut=128)

### Perform a GLM analysis ########################################

fmri_glm = FirstLevelGLM().fit(fmri_img, design_matrix)

### Estimate contrasts #########################################

# Specify the contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
    contrasts["calculaudio"] + contrasts["phraseaudio"]
예제 #36
0
def do_subject_glm(subject_id):
    subject_output_dir = os.path.join(output_dir, subject_id)

    # make design matrices
    design_matrices = []
    func = []
    anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
    for run_path in sorted(
            glob.glob(
                os.path.join(data_dir, subject_id,
                             "model/model001/onsets/task*"))):
        run_id = os.path.basename(run_path)
        run_func = glob.glob(
            os.path.join(subject_output_dir, "BOLD", run_id, "wrbold*.nii"))
        assert len(run_func) == 1
        run_func = run_func[0]
        run_onset_paths = sorted(
            glob.glob(
                os.path.join(data_dir, subject_id,
                             "model/model001/onsets/%s/*" % run_id)))
        onsets = map(np.loadtxt, run_onset_paths)
        conditions = np.hstack([[condition_keys["cond%03i" % (c + 1)]] *
                                len(onsets[c])
                                for c in range(len(run_onset_paths))])
        onsets = np.vstack((onsets))
        onsets *= tr
        run_func = nibabel.load(run_func)
        func.append(run_func)
        n_scans = run_func.shape[-1]
        onset, duration, modulation = onsets.T

        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        paradigm = pd.DataFrame(
            dict(name=conditions,
                 onset=onset,
                 duration=duration,
                 modulation=modulation))
        design_matrix = make_design_matrix(frametimes,
                                           paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut)
        design_matrices.append(design_matrix)
    n_runs = len(func)

    # specify contrasts
    _, _, names = check_design_matrix(design_matrix)
    n_columns = len(names)
    contrast_matrix = np.eye(n_columns)
    contrasts = {}
    for c in range(len(condition_keys)):
        contrasts[names[2 * c]] = contrast_matrix[2 * c]
    contrasts["avg"] = np.mean(contrasts.values(), axis=0)

    # more interesting contrasts
    contrasts_ = {}
    for contrast, val in contrasts.items():
        if not contrast == "avg":
            contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
    contrasts = contrasts_

    # fit GLM
    from nilearn.image import smooth_img
    func = smooth_img(func, fwhm=8.)
    print('Fitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel(func, [
        check_design_matrix(design_matrix)[1]
        for design_matrix in design_matrices
    ],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_output_dir, "mask.nii")
    print("Saving mask image to %s ..." % mask_path)
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrast maps
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * n_runs,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            nibabel.save(out_map, map_path)
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # # generate stats report
    # stats_report_filename = os.path.join(subject_output_dir, "reports",
    #                                      "report_stats.html")
    # generate_subject_stats_report(
    #     stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
    #     threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
    #     subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
    #     paradigm=paradigm, frametimes=frametimes,
    #     drift_model=drift_model, hrf_model=hrf_model)
    # ProgressReport().finish_dir(subject_output_dir)

    return dict(subject_id=subject_id,
                mask=mask_path,
                effects_maps=effects_maps,
                z_maps=z_maps,
                contrasts=contrasts)
예제 #37
0
def first_level(analysis, block, space, deriv_dir):
    analyses = []
    for paradigm, _, ents in block.get_design_matrix(block.model['HRF_variables'],
                                                     mode='sparse'):
        preproc_files = analysis.layout.get(type='preproc', space=space, **ents)
        if len(preproc_files) == 0:
            raise ValueError("No PREPROC files found")

        if len(preproc_files) != 1:
            print(preproc_files)
            raise ValueError("Too many potential PREPROC files")

        fname = preproc_files[0].filename

        img = nb.load(fname)
        TR = img.header.get_zooms()[3]
        vols = img.shape[3]

        # Get dense portion of design matrix once TR is known
        _, confounds, _ = block.get_design_matrix(mode='dense',
                                                  sampling_rate=1/TR, **ents)[0]
        names = [col for col in confounds.columns
                 if col.startswith('NonSteadyStateOutlier') or
                 col in block.model['variables']]

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * TR,
            paradigm=paradigm.rename(columns={'condition': 'trial_type',
                                              'amplitude': 'modulation'}),
            add_regs=confounds[names].fillna(0),
            add_reg_names=names,
            drift_model=None if 'Cosine00' in names else 'cosine',
            )

        preproc_ents = analysis.layout.parse_file_entities(fname)

        dm_ents = {k: v for k, v in preproc_ents.items()
                   if k in ('subject', 'session', 'task')}

        dm_ents['type'] = 'design'
        design_fname = op.join(deriv_dir,
                               analysis.layout.build_path(dm_ents, strict=True))
        os.makedirs(op.dirname(design_fname), exist_ok=True)
        mat.to_csv(design_fname, sep='\t')
        plt.set_cmap('viridis')
        plot_and_save(design_fname.replace('.tsv', '.svg'),
                      nis.reporting.plot_design_matrix, mat)

        corr_ents = dm_ents.copy()
        corr_ents['type'] = 'corr'
        corr_fname = op.join(deriv_dir,
                             analysis.layout.build_path(corr_ents, strict=True))
        plot_and_save(corr_fname, plot_corr_matrix,
                      mat.drop(columns=['constant']).corr(),
                      len(block.model['HRF_variables']))

        job_desc = {
            'ents': ents,
            'subject_id': ents['subject'],
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'design_matrix_svg': design_fname.replace('.tsv', '.svg'),
            'correlation_matrix_svg': corr_fname,
            }

        cnames = [contrast['name'] for contrast in block.contrasts] + block.model['HRF_variables']
        contrast_matrix = []
        if cnames:
            contrasts_ents = corr_ents.copy()
            contrasts_ents['type'] = 'contrasts'
            contrasts_fname = op.join(
                deriv_dir,
                analysis.layout.build_path(contrasts_ents, strict=True))

            contrast_matrix = expand_contrast_matrix(
                block.get_contrasts(cnames, **ents)[0][0], mat)
            plot_and_save(contrasts_fname, plot_contrast_matrix,
                          contrast_matrix.drop(['constant'], 'index'),
                          ornt='horizontal')

            job_desc['contrasts_svg'] = contrasts_fname

        brainmask = analysis.layout.get(type='brainmask', space=space,
                                        **ents)[0]
        fmri_glm = None

        for contrast in contrast_matrix:
            stat_ents = preproc_ents.copy()
            stat_ents.pop('modality', None)
            stat_ents.update({'contrast': snake_to_camel(contrast),
                              'type': 'stat'})
            stat_fname = op.join(deriv_dir,
                                 analysis.layout.build_path(stat_ents,
                                                            strict=True))

            ortho_ents = stat_ents.copy()
            ortho_ents['type'] = 'ortho'
            ortho_fname = op.join(deriv_dir,
                                  analysis.layout.build_path(ortho_ents,
                                                             strict=True))

            desc = {'name': contrast, 'image_file': ortho_fname}
            if contrast not in block.model['HRF_variables']:
                job_desc.setdefault('contrasts', []).append(desc)
            else:
                job_desc.setdefault('estimates', []).append(desc)

            if op.exists(stat_fname):
                continue

            if fmri_glm is None:
                fmri_glm = level1.FirstLevelModel(mask=brainmask.filename)
                fmri_glm.fit(fname, design_matrices=mat)

            stat_types = [c['type'] for c in block.contrasts if c['name'] == contrast]
            stat_type = stat_types[0] if stat_types else 'T'
            stat = fmri_glm.compute_contrast(contrast_matrix[contrast].values,
                                             {'T': 't', 'F': 'F'}[stat_type])
            stat.to_filename(stat_fname)

            nlp.plot_glass_brain(stat, colorbar=True, plot_abs=False,
                                 display_mode='lyrz', axes=None,
                                 output_file=ortho_fname)

        analyses.append(job_desc)

    return analyses
예제 #38
0
n_scans = 128
tr = 2.4
frame_times = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans)

# data
data = datasets.fetch_localizer_first_level()
paradigm_file = data.paradigm
fmri_img = data.epi_img

### Design matrix ########################################

paradigm = pd.read_csv(paradigm_file, sep=' ', header=None, index_col=None)
paradigm.columns = ['session', 'name', 'onset']
design_matrix = make_design_matrix(frame_times,
                                   paradigm,
                                   hrf_model='canonical with derivative',
                                   drift_model="cosine",
                                   period_cut=128)

### Perform a GLM analysis ########################################

fmri_glm = FirstLevelGLM().fit(fmri_img, design_matrix)

### Estimate contrasts #########################################

# Specify the contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
# This is just a flag to be able to use the same script for the plotting
if False:
    for study in studies:
        voxel_fn = op.join(folder, study + '.npy')
        # Paradigm file
        paradigm_fn = op.join(folder0, 'onsets.csv')
        ########################################################################
        # Load data and parameters
        n_scans = 144
        t_r = 3.
        ys = np.load(voxel_fn)

        # Create design matrix
        frametimes = np.arange(0, n_scans * t_r, t_r)
        paradigm = experimental_paradigm.paradigm_from_csv(paradigm_fn)
        dm = design_matrix.make_design_matrix(frametimes, paradigm=paradigm)
        modulation = np.array(paradigm)[:, 4]

        # GP parameters
        time_offset = 10
        gamma = 10.
        fmin_max_iter = 50
        n_restarts_optimizer = 10
        n_iter = 3
        normalize_y = False
        optimize = True
        zeros_extremes = True

        # Estimation
        gp = SuperDuperGP(hrf_length=hrf_length, t_r=t_r, oversampling=1./dt,
                          gamma=gamma, modulation=modulation,
예제 #40
0
def run_glm(tr, slice_time_ref, paradigm_file, fmri_img, motion_file=None):
    """Run a GLM on surface usingon the given surface images.
    1)    Create a design matrix using the paradigm file
    1bis) Add the motion regressors if the motionn file is given
    2)    Create a first_level_surf_model object and fit the GLM
    3)    Get all possible individual effect contrasts
    :param tr: Repetition time
    :param slice_time_ref: Number of slice by TR
    :param paradigm_file: CSV file that list onsets and at least associated duration and condition
    :param fmri_img: Name of the gifti file that contains surface fMRI data
    :param motion_file: Text file that contains (6) estimated motion regressors
    :return: first_level_surf_model object + original contrasts ([[1 0 0 ... 0], [0 1 0 ... 0]...])
    """
    # Read the paradigm file
    paradigm = pd.read_csv(paradigm_file, sep=',', index_col=None)

    if motion_file is not None:
        # Create a design matrix and add previously estimated motion regressors
        # Read the motion file
        motion_cols = ['m1', 'm2', 'm3', 'm4', 'm5', 'm6']
        motion = pd.read_csv(motion_file,
                             sep="  ",
                             index_col=None,
                             names=motion_cols,
                             engine='python')
        nbr_vol = len(motion[motion.keys()[0]])
        print("Number of scans in motion file: {}".format(nbr_vol))

        # **** Build the design matrix ***
        # Frame timing is determined with number of scans registered in the motion file
        t_vect = np.arange(nbr_vol) * tr
        # Create the design matrix without motion drift regressors
        d_matrix = make_design_matrix(t_vect,
                                      paradigm=paradigm,
                                      drift_model='blank')
        # Add motion regressors
        for col in motion_cols:
            for i, t in enumerate(t_vect):
                motion[col][t] = motion[col][i]
            d_matrix[col] = motion[col]
    else:
        # Find the number of voulume in the .gii file
        gii_imgs = ng.read(fmri_img)
        nbr_vol = len(gii_imgs.darrays)
        del gii_imgs
        print("Number of scans : {}".format(nbr_vol))
        t_vect = tr * np.arange(nbr_vol)
        d_matrix = make_design_matrix(t_vect,
                                      paradigm=paradigm,
                                      drift_model='blank')

    # **** Perform first level glm ****
    # Setup and fit GLM
    first_level_surf_model = FirstLevelSurfaceModel(
        tr, slice_time_ref, hrf_model='glover + derivative')
    first_level_surf_model = first_level_surf_model.fit(
        fmri_img, design_matrices=d_matrix)

    # **** Estimate contrasts ****
    # Specify the contrasts
    design_matrix = first_level_surf_model.design_matrices_[0]
    contrast_matrix = np.eye(design_matrix.shape[1])
    contrasts = dict([(column, contrast_matrix[i])
                      for i, column in enumerate(design_matrix.columns)])

    return first_level_surf_model, contrasts
def run_glm(tr,
            slice_time_ref,
            paradigm_file,
            fmri_img,
            output_dir,
            motion_file=None):
    design_matrix_png_filename = op.join(output_dir, "design_matrix.png")
    design_matrix_npy_filename = op.join(output_dir, "design_matrix")

    paradigm = pd.read_csv(paradigm_file, sep='\t', index_col=None)

    # Find the number of voulume in the .nii file
    nii_imgs = nb.load(fmri_img)
    nbr_vol = nii_imgs.get_data().shape[3]
    del nii_imgs
    print("Number of scans : {}".format(nbr_vol))

    # **** Build the design matrix ****
    if motion_file is not None:
        motion_cols = ['m1', 'm2', 'm3', 'm4', 'm5', 'm6']
        motion = pd.read_csv(motion_file,
                             sep="  ",
                             index_col=None,
                             names=motion_cols,
                             engine='python')

        if len(motion[motion.keys()[0]]) != nbr_vol:
            warnings.warn("Number of scan and row countof the motion file "
                          "dismatch.")
            exit(-1)

        # Frame timing is determined with number of scans registered in the
        # motion file
        t_vect = np.arange(nbr_vol) * tr
        # Create the design matrix without motion drift regressors
        d_matrix = make_design_matrix(t_vect,
                                      paradigm=paradigm,
                                      drift_model='blank')
        # Add motion regressors
        for col in motion_cols:
            for i, t in enumerate(t_vect):
                motion[col][t] = motion[col][i]
            d_matrix[col] = motion[col]
    else:
        t_vect = tr * np.arange(nbr_vol)
        d_matrix = make_design_matrix(t_vect,
                                      paradigm=paradigm,
                                      drift_model='blank')

    # Plot the design matrix
    x = d_matrix.as_matrix()
    np.save(design_matrix_npy_filename, x)
    print("Design matrix save as: {}".format(design_matrix_npy_filename))

    fig = plt.figure()
    plt.imshow(x, aspect="auto", interpolation="nearest")
    fig.savefig(design_matrix_png_filename)
    print("Design matrix save as: {}".format(design_matrix_png_filename))
    # plot_design_matrix(d_matrix)

    # **** Perform first level glm ****
    # Setup and fit GLM
    print("Run GLM")
    first_level_model = FirstLevelModel(tr,
                                        slice_time_ref,
                                        hrf_model='glover + derivative',
                                        verbose=2)
    first_level_model = first_level_model.fit(fmri_img,
                                              design_matrices=d_matrix)

    # **** Estimate contrasts ****
    # Specify the contrasts
    design_matrix = first_level_model.design_matrices_[0]
    contrast_matrix = np.eye(design_matrix.shape[1])
    cntrst = dict([(column, contrast_matrix[i])
                   for i, column in enumerate(design_matrix.columns)])

    return first_level_model, cntrst
예제 #42
0
    paths_patterns={'paradigm': 'paradigms/acquisition1/*BOLD*1b.csv'})
paradigm_file = heroes['paradigm'][0]

# Read the paradigm
from nistats import experimental_paradigm
paradigm = experimental_paradigm.paradigm_from_csv(paradigm_file)

# Create the design matrix
import numpy as np
import matplotlib.pyplot as plt
import nibabel
from nistats.design_matrix import make_design_matrix, plot_design_matrix
tr = 2.5
n_scans = nibabel.load(func_file).get_data().shape[-1]
frametimes = np.arange(0, n_scans * tr, tr)
design_matrix = make_design_matrix(frametimes, paradigm)
plot_design_matrix(design_matrix)
plt.tight_layout()

# Fit GLM
print('Fitting a GLM')
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(tr)
fmri_glm = fmri_glm.fit(func_file, design_matrices=design_matrix)

# Specify the contrasts
contrasts = {}
n_columns = len(design_matrix.columns)
for n, name in enumerate(design_matrix.columns[:3]):
    contrasts[name] = np.zeros((n_columns, ))
    contrasts[name][n] = 1
예제 #43
0
def first_level(subject_dic, mask_img, compcorr=True, smooth=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject
    
    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled 
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whetherconfound estimation and removal should be carried out or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    import nibabel as nib
    import numpy as np
    from nistats.design_matrix import make_design_matrix
    from nilearn.image import high_variance_confounds
    import pandas as pd
    from nistats.first_level_model import FirstLevelModel

    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = 'spm'
    hfcut = subject_dic['hfcut']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):
        n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

        confounds = motion
        confound_names = motion_names
        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names

        paradigm = pd.read_csv(onset, sep='\t')
        trial_type = paradigm.trial_type.values
        audio_right_hands = ['audio_right_hand_%d' % i for i in range(5)]
        audio_left_hands = ['audio_left_hand_%d' % i for i in range(5)]
        video_right_hands = ['video_right_hand_%d' % i for i in range(5)]
        video_left_hands = ['video_left_hand_%d' % i for i in range(5)]
        trial_type[trial_type == 'audio_right_hand'] = audio_right_hands
        trial_type[trial_type == 'audio_left_hand'] = audio_left_hands
        trial_type[trial_type == 'video_right_hand'] = video_right_hands
        trial_type[trial_type == 'video_left_hand'] = video_left_hands

        # create the design matrix
        design_matrix = make_design_matrix(frametimes,
                                           paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut,
                                           add_regs=confounds,
                                           add_reg_names=confound_names)

        # create the relevant contrasts
        names = design_matrix.columns
        n_regressors = len(names)
        interest = audio_right_hands + audio_left_hands + video_right_hands + video_left_hands
        con = dict([(names[i], np.eye(n_regressors)[i])
                    for i in range(n_regressors)])
        contrasts = dict([(contrast, con[contrast]) for contrast in interest])

        subject_session_output_dir = os.path.join(subject_dic['output_dir'],
                                                  'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        design_matrix.to_csv(
            os.path.join(subject_session_output_dir, 'design_matrix.npz'))

        fmri_glm = FirstLevelModel(mask=mask_img,
                                   t_r=tr,
                                   slice_time_ref=.5,
                                   smoothing_fwhm=smooth).fit(
                                       fmri_path,
                                       design_matrices=design_matrix)

        # compute contrasts
        for contrast_id, contrast_val in contrasts.iteritems():
            print "\tcontrast id: %s" % contrast_id

            # store stat maps to disk
            for map_type in ['z_score']:
                stat_map = fmri_glm.compute_contrast(contrast_val,
                                                     output_type=map_type)
                map_dir = os.path.join(subject_session_output_dir,
                                       '%s_maps' % map_type)
                if not os.path.exists(map_dir):
                    os.makedirs(map_dir)
                map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
                print "\t\tWriting %s ..." % map_path
                stat_map.to_filename(map_path)
예제 #44
0
    def _run_interface(self, runtime):
        import matplotlib
        matplotlib.use('Agg')
        import seaborn as sns
        from matplotlib import pyplot as plt
        sns.set_style('white')
        plt.rcParams['svg.fonttype'] = 'none'
        plt.rcParams['image.interpolation'] = 'nearest'

        info = self.inputs.session_info

        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        events = pd.read_hdf(info['events'], key='events')
        confounds = pd.read_hdf(info['confounds'], key='confounds')
        if isdefined(self.inputs.contrast_info):
            contrast_spec = pd.read_hdf(self.inputs.contrast_info,
                                        key='contrasts')
        else:
            contrast_spec = pd.DataFrame()

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            paradigm=events.rename(columns={
                'condition': 'trial_type',
                'amplitude': 'modulation'
            }),
            add_regs=confounds,
            add_reg_names=confounds.columns.tolist(),
            drift_model=None if 'Cosine00' in confounds.columns else 'cosine',
        )

        exp_vars = events['condition'].unique().tolist()

        contrast_matrix, contrast_types = build_contrast_matrix(
            contrast_spec, mat, exp_vars)

        plt.set_cmap('viridis')
        plot_and_save('design.svg', nis.reporting.plot_design_matrix, mat)
        self._results['design_matrix_plot'] = os.path.join(
            runtime.cwd, 'design.svg')

        plot_and_save('correlation.svg', plot_corr_matrix,
                      mat.drop(columns='constant').corr(), len(exp_vars))
        self._results['correlation_matrix_plot'] = os.path.join(
            runtime.cwd, 'correlation.svg')

        plot_and_save('contrast.svg',
                      plot_contrast_matrix,
                      contrast_matrix.drop(['constant'], 'index'),
                      ornt='horizontal')
        self._results['contrast_matrix_plot'] = os.path.join(
            runtime.cwd, 'contrast.svg')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        flm = level1.FirstLevelModel(mask=mask_file)
        flm.fit(img, design_matrices=mat)

        estimate_maps = []
        contrast_maps = []
        estimate_metadata = []
        contrast_metadata = []
        estimate_map_plots = []
        contrast_map_plots = []
        stat_fmt = os.path.join(runtime.cwd, '{}.nii.gz').format
        plot_fmt = os.path.join(runtime.cwd, '{}.png').format
        for contrast, ctype in zip(contrast_matrix, contrast_types):
            es = flm.compute_contrast(contrast_matrix[contrast].values, {
                'T': 't',
                'F': 'F'
            }[ctype],
                                      output_type='effect_size')
            es_fname = stat_fmt(contrast)
            es.to_filename(es_fname)
            plot_fname = plot_fmt(contrast)
            nlp.plot_glass_brain(es,
                                 colorbar=True,
                                 plot_abs=False,
                                 display_mode='lyrz',
                                 axes=None,
                                 output_file=plot_fname)

            if contrast in exp_vars:
                estimate_maps.append(es_fname)
                estimate_map_plots.append(plot_fname)
                estimate_metadata.append({
                    'contrast': contrast,
                    'type': 'effect'
                })
            else:
                contrast_maps.append(es_fname)
                contrast_map_plots.append(plot_fname)
                contrast_metadata.append({
                    'contrast': contrast,
                    'type': 'effect'
                })
        self._results['estimate_maps'] = estimate_maps
        self._results['contrast_maps'] = contrast_maps
        self._results['estimate_metadata'] = estimate_metadata
        self._results['contrast_metadata'] = contrast_metadata
        self._results['estimate_map_plots'] = estimate_map_plots
        self._results['contrast_map_plots'] = contrast_map_plots

        return runtime
예제 #45
0
n_scans = 96
epoch_duration = 6 * tr  # duration in seconds
conditions = ['rest', 'active'] * 8
n_blocks = len(conditions)
duration = epoch_duration * np.ones(n_blocks)
onset = np.linspace(0, (n_blocks - 1) * epoch_duration, n_blocks)
paradigm = pd.DataFrame(
    {'onset': onset, 'duration': duration, 'name': conditions})

# construct design matrix
frame_times = np.linspace(0, (n_scans - 1) * tr, n_scans)
drift_model = 'Cosine'
period_cut = 4. * epoch_duration
hrf_model = 'glover + derivative'
design_matrix = make_design_matrix(
    frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model,
    period_cut=period_cut)

# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

# Specify one interesting contrast
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']}

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(
    [fmri_img], design_matrix)
예제 #46
0
    n_scans = fmri_img[idx].shape[-1]
    timing = loadmat(getattr(subject_data, "trials_ses%i" % (idx + 1)),
                     squeeze_me=True, struct_as_record=False)

    faces_onsets = timing['onsets'][0].ravel()
    scrambled_onsets = timing['onsets'][1].ravel()
    onsets = np.hstack((faces_onsets, scrambled_onsets))
    onsets *= tr  # because onsets were reporting in 'scans' units
    conditions = (['faces'] * len(faces_onsets) +
                  ['scrambled'] * len(scrambled_onsets))
    paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})

    # build design matrix
    frame_times = np.arange(n_scans) * tr
    design_matrix = make_design_matrix(
        frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model,
        period_cut=period_cut)
    design_matrices.append(design_matrix)

# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
# more interesting contrasts
contrasts = {
    'faces-scrambled': contrasts['faces'] - contrasts['scrambled'],
    'scrambled-faces': -contrasts['faces'] + contrasts['scrambled'],
    'effects_of_interest': np.vstack((contrasts['faces'],
                                      contrasts['scrambled']))
    }
예제 #47
0
def do_subject_glm(subject_id):
    subject_output_dir = os.path.join(output_dir, subject_id)

    # make design matrices
    design_matrices = []
    func = []
    anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
    for run_path in sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/task*"))):
        run_id = os.path.basename(run_path)
        run_func = glob.glob(os.path.join(subject_output_dir, "BOLD", run_id,
                                          "wrbold*.nii"))
        assert len(run_func) == 1
        run_func = run_func[0]
        run_onset_paths = sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/%s/*" % run_id)))
        onsets = map(np.loadtxt, run_onset_paths)
        conditions = np.hstack(
            [[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c])
             for c in range(len(run_onset_paths))])
        onsets = np.vstack((onsets))
        onsets *= tr
        run_func = nibabel.load(run_func)
        func.append(run_func)
        n_scans = run_func.shape[-1]
        onset, duration, modulation = onsets.T

        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        paradigm = pd.DataFrame(dict(name=conditions, onset=onset,
                                     duration=duration, modulation=modulation))
        design_matrix = make_design_matrix(frametimes, paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut)
        design_matrices.append(design_matrix)
    n_runs = len(func)

    # specify contrasts
    _, _, names = check_design_matrix(design_matrix)
    n_columns = len(names)
    contrast_matrix = np.eye(n_columns)
    contrasts = {}
    for c in range(len(condition_keys)):
        contrasts[names[2 * c]] = contrast_matrix[2 * c]
    contrasts["avg"] = np.mean(contrasts.values(), axis=0)

    # more interesting contrasts
    contrasts_ = {}
    for contrast, val in contrasts.items():
        if not contrast == "avg":
            contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
    contrasts = contrasts_

    # fit GLM
    from nilearn.image import smooth_img
    func = smooth_img(func, fwhm=8.)
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel(func, [check_design_matrix(design_matrix)[1]
                                      for design_matrix in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_output_dir, "mask.nii")
    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrast maps
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * n_runs, con_id=contrast_id, output_z=True,
            output_stat=True, output_effects=True, output_variance=True)
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # # generate stats report
    # stats_report_filename = os.path.join(subject_output_dir, "reports",
    #                                      "report_stats.html")
    # generate_subject_stats_report(
    #     stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
    #     threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
    #     subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
    #     paradigm=paradigm, frametimes=frametimes,
    #     drift_model=drift_model, hrf_model=hrf_model)
    # ProgressReport().finish_dir(subject_output_dir)

    return dict(subject_id=subject_id, mask=mask_path,
                effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)