示例#1
0
def test_first_level_predictions_r_square():
    shapes, rk = [(10, 10, 10, 25)], 3
    mask, fmri_data, design_matrices =\
        generate_fake_fmri_data_and_design(shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            signal_scaling=False,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    pred = model.predicted[0]
    data = fmri_data[0]
    r_square_3d = model.r_square[0]

    y_predicted = model.masker_.transform(pred)
    y_measured = model.masker_.transform(data)

    assert_almost_equal(np.mean(y_predicted - y_measured), 0)

    r_square_2d = model.masker_.transform(r_square_3d)
    assert_array_less(0., r_square_2d)
示例#2
0
def test_check_first_level_contrast():
    from nilearn.glm.second_level.second_level import \
        _check_first_level_contrast
    _check_first_level_contrast(["foo"], None)  # Should not do anything
    with pytest.raises(ValueError, match="If second_level_input was a list"):
        _check_first_level_contrast([FirstLevelModel()], None)
    _check_first_level_contrast([FirstLevelModel()], "foo")
示例#3
0
def test_first_level_with_no_signal_scaling():
    """
    test to ensure that the FirstLevelModel works correctly with a
    signal_scaling==False. In particular, that derived theta are correct for a
    constant design matrix with a single valued fmri image
    """
    shapes, rk = [(3, 1, 1, 2)], 1
    fmri_data = list()
    design_matrices = list()
    design_matrices.append(
        pd.DataFrame(np.ones((shapes[0][-1], rk)),
                     columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
    # Check error with invalid signal_scaling values
    with pytest.raises(ValueError, match="signal_scaling must be"):
        FirstLevelModel(mask_img=False,
                        noise_model='ols',
                        signal_scaling="foo")

    first_level = FirstLevelModel(mask_img=False,
                                  noise_model='ols',
                                  signal_scaling=False)
    fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))

    first_level.fit(fmri_data, design_matrices=design_matrices)
    # trivial test of signal_scaling value
    assert first_level.signal_scaling is False
    # assert that our design matrix has one constant
    assert first_level.design_matrices_[0].equals(
        pd.DataFrame([1.0, 1.0], columns=['a']))
    # assert that we only have one theta as there is only on voxel in our image
    assert first_level.results_[0][0].theta.shape == (1, 1)
    # assert that the theta is equal to the one voxel value
    assert_almost_equal(first_level.results_[0][0].theta[0, 0], 6.0, 2)
示例#4
0
def test_first_level_design_creation():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3)
        model = model.fit(func_img, events)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = get_data(func_img).shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_first_level_design_matrix(frame_times,
                                                events,
                                                drift_model='polynomial',
                                                drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del FUNCFILE, mask, model, func_img
def report_flm_fiac():  # pragma: no cover
    data = datasets.func.fetch_fiac_first_level()
    fmri_img = [data['func1'], data['func2']]

    from nilearn.image import mean_img
    mean_img_ = mean_img(fmri_img[0])

    design_files = [data['design_matrix1'], data['design_matrix2']]
    design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

    fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
    fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

    n_columns = design_matrices[0].shape[1]

    contrasts = {
        'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
        'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
        'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
        'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
        'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
        'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
        'Deactivation': _pad_vector([-1, -1, -1, -1, 4], n_columns),
        'Effects_of_interest': np.eye(n_columns)[:5]
    }
    report = make_glm_report(
        fmri_glm,
        contrasts,
        bg_img=mean_img_,
        height_control='fdr',
    )
    output_filename = 'generated_report_flm_fiac.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
示例#6
0
def test_first_level_residuals():
    shapes, rk = [(10, 10, 10, 100)], 3
    mask, fmri_data, design_matrices =\
        generate_fake_fmri_data_and_design(shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    # Check that voxelwise model attributes cannot be
    # accessed if minimize_memory is set to True
    model = FirstLevelModel(mask_img=mask,
                            minimize_memory=True,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    with pytest.raises(ValueError, match="To access voxelwise attributes"):
        residuals = model.residuals[0]

    model = FirstLevelModel(mask_img=mask,
                            minimize_memory=False,
                            noise_model='ols')

    # Check that trying to access residuals without fitting
    # raises an error
    with pytest.raises(ValueError, match="The model has not been fit yet"):
        residuals = model.residuals[0]

    model.fit(fmri_data, design_matrices=design_matrices)

    # For coverage
    with pytest.raises(ValueError, match="attribute must be one of"):
        model._get_voxelwise_model_attribute("foo", True)
    residuals = model.residuals[0]
    mean_residuals = model.masker_.transform(residuals).mean(0)
    assert_array_almost_equal(mean_residuals, 0)
示例#7
0
def test_flm_reporting(use_method):
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(shapes, rk)
        flm = FirstLevelModel(mask_img=mask).fit(
            fmri_data, design_matrices=design_matrices)
        contrast = np.eye(3)[1]
        if use_method:
            report_flm = flm.generate_report(
                contrast, plot_type='glass', height_control=None,
                min_distance=15, alpha=0.001, threshold=2.78)
        else:
            report_flm = glmr.make_glm_report(flm, contrast, plot_type='glass',
                                              height_control=None,
                                              min_distance=15,
                                              alpha=0.001, threshold=2.78,
            )
        '''
        catches & raises UnicodeEncodeError in HTMLDocument.get_iframe()
        Python2's limited unicode support causes  HTMLDocument.get_iframe() to
        mishandle certain unicode characters, like the greek alpha symbol
        and raises this error.
        Calling HTMLDocument.get_iframe() here causes the tests
        to fail on Python2, alerting us if such a situation arises
        due to future modifications.
        '''
        report_iframe = report_flm.get_iframe()
        # So flake8 doesn't complain about not using variable (F841)
        report_iframe
        del mask, flm, fmri_data
示例#8
0
def test_masking_first_level_model():
    """
    Checks that using NiftiMasker when instantiating
    FirstLevelModel doesn't raise Error when calling
    generate_report().
    """
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices =\
            write_fake_fmri_data_and_design(shapes, rk)
        masker = NiftiMasker(mask_img=mask)
        masker.fit(fmri_data)
        flm = FirstLevelModel(mask_img=masker).fit(
            fmri_data, design_matrices=design_matrices
        )
        contrast = np.eye(3)[1]

        report_flm = flm.generate_report(
            contrast, plot_type='glass', height_control=None,
            min_distance=15, alpha=0.001, threshold=2.78
        )

        report_iframe = report_flm.get_iframe()
        # So flake8 doesn't complain about not using variable (F841)
        report_iframe

        del mask, flm, fmri_data, masker
def test_high_level_glm_different_design_matrices():
    # test that one can estimate a contrast when design matrices are different
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)

    # add a column to the second design matrix
    design_matrices[1]['new'] = np.ones((19, 1))

    # Fit a glm with two sessions and design matrices
    multi_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data, design_matrices=design_matrices)
    z_joint = multi_session_model.compute_contrast(
        [np.eye(rk)[:1], np.eye(rk + 1)[:1]], output_type='effect_size')
    assert z_joint.shape == (7, 8, 7)

    # compare the estimated effects to seprarately-fitted models
    model1 = FirstLevelModel(mask_img=mask).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = model1.compute_contrast(np.eye(rk)[:1], output_type='effect_size')
    model2 = FirstLevelModel(mask_img=mask).fit(
        fmri_data[1], design_matrices=design_matrices[1])
    z2 = model2.compute_contrast(np.eye(rk + 1)[:1],
                                 output_type='effect_size')
    assert_almost_equal(get_data(z1) + get_data(z2),
                        2 * get_data(z_joint))
示例#10
0
def run_glm(dmtx,
            contrasts,
            fmri_data,
            mask_img,
            subject_dic,
            subject_session_output_dir,
            tr,
            slice_time_ref,
            smoothing_fwhm=False):
    """ Run the GLM on a given session and compute contrasts

    Parameters
    ----------
    dmtx : array-like
        the design matrix for the model
    contrasts : dict
        holding the numerical specification of contrasts
    fmri_data : Nifti1Image
        the fMRI data fir by the model
    mask_img : Nifti1Image
        the mask used for the fMRI data
    """
    from nilearn.glm.first_level import FirstLevelModel
    fmri_4d = nib.load(fmri_data)

    # GLM analysis
    print('Fitting a GLM (this takes time)...')
    fmri_glm = FirstLevelModel(mask_img=mask_img,
                               t_r=tr,
                               slice_time_ref=slice_time_ref,
                               smoothing_fwhm=smoothing_fwhm).fit(
                                   fmri_4d, design_matrices=dmtx)

    # compute contrasts
    z_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)

        # store stat maps to disk
        for map_type in ['z_score', 'stat', 'effect_size', 'effect_variance']:
            stat_map = fmri_glm.compute_contrast(contrast_val,
                                                 output_type=map_type)
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            stat_map.to_filename(map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z_score':
                z_maps[contrast_id] = map_path
    return z_maps, fmri_glm
def test_high_level_glm_with_paths():
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
    with InTemporaryDirectory():
        mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(shapes, rk)
        multi_session_model = FirstLevelModel(mask_img=None).fit(
            fmri_files, design_matrices=design_files)
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert_array_equal(z_image.affine, load(mask_file).affine)
        assert get_data(z_image).std() < 3.
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del z_image, fmri_files, multi_session_model
示例#12
0
def test_explicit_fixed_effects():
    """ tests the fixed effects performed manually/explicitly"""
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices =\
            write_fake_fmri_data_and_design(shapes, rk)
        contrast = np.eye(rk)[1]
        # session 1
        multi_session_model = FirstLevelModel(mask_img=mask).fit(
            fmri_data[0], design_matrices=design_matrices[:1])
        dic1 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # session 2
        multi_session_model.fit(fmri_data[1],
                                design_matrices=design_matrices[1:])
        dic2 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # fixed effects model
        multi_session_model.fit(fmri_data, design_matrices=design_matrices)
        fixed_fx_dic = multi_session_model.compute_contrast(contrast,
                                                            output_type='all')

        # manual version
        contrasts = [dic1['effect_size'], dic2['effect_size']]
        variance = [dic1['effect_variance'], dic2['effect_variance']]
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance, mask)

        assert_almost_equal(get_data(fixed_fx_contrast),
                            get_data(fixed_fx_dic['effect_size']))
        assert_almost_equal(get_data(fixed_fx_variance),
                            get_data(fixed_fx_dic['effect_variance']))
        assert_almost_equal(get_data(fixed_fx_stat),
                            get_data(fixed_fx_dic['stat']))

        # test without mask variable
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance)
        assert_almost_equal(get_data(fixed_fx_contrast),
                            get_data(fixed_fx_dic['effect_size']))
        assert_almost_equal(get_data(fixed_fx_variance),
                            get_data(fixed_fx_dic['effect_variance']))
        assert_almost_equal(get_data(fixed_fx_stat),
                            get_data(fixed_fx_dic['stat']))

        # ensure that using unbalanced effects size and variance images
        # raises an error
        with pytest.raises(ValueError):
            compute_fixed_effects(contrasts * 2, variance, mask)
        del mask, multi_session_model
示例#13
0
def test_glm_sample_mask():
    """Ensure the sample mask is performing correctly in GLM."""
    shapes, rk = [(10, 10, 10, 25)], 3
    mask, fmri_data, design_matrix =\
        generate_fake_fmri_data_and_design(shapes, rk)
    model = FirstLevelModel(t_r=2.0, mask_img=mask, minimize_memory=False)
    sample_mask = np.arange(25)[3:]  # censor the first three volumes
    model.fit(fmri_data,
              design_matrices=design_matrix,
              sample_masks=sample_mask)
    assert model.design_matrices_[0].shape[0] == 22
    assert model.predicted[0].shape[-1] == 22
def test_high_level_glm_one_session():
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)

    single_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    assert isinstance(single_session_model.masker_.mask_img_,
                      Nifti1Image)

    single_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = single_session_model.compute_contrast(np.eye(rk)[:1])
    assert isinstance(z1, Nifti1Image)
def test_first_level_residuals():
    shapes, rk = [(10, 10, 10, 100)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask, minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    residuals = model.residuals[0]
    mean_residuals = model.masker_.transform(residuals).mean(0)
    assert_array_almost_equal(mean_residuals, 0)
示例#16
0
def test_get_voxelwise_attributes_should_return_as_many_as_design_matrices(
        shapes):
    mask, fmri_data, design_matrices =\
        generate_fake_fmri_data_and_design(shapes)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    # Check that length of outputs is the same as the number of design matrices
    assert len(model._get_voxelwise_model_attribute("resid", True)) == \
           len(shapes)
示例#17
0
def create_bmap(sub_dir: Union[str, os.PathLike],
                noise_model: str = 'ar1',
                hrf_model: str = 'spm',
                drift_model: str = None,
                fwhm: int = 8,
                **kwargs):
    sub_id = os.path.basename(sub_dir)
    tsk_prfx = '_ses-04_run-01_task-memory_'
    outfile_suffix = '_bmap-effectsize.nii.gz'
    confounds = pd.read_csv(pjoin(sub_dir,
                                  sub_id + tsk_prfx + 'confounds.tsv'),
                            sep='\t')
    events = pd.read_csv(pjoin(sub_dir, sub_id + tsk_prfx + 'events.tsv'),
                         sep='\t')
    contrast_list = []
    sub_out_dir = pjoin(xpu(output_dir), 'derivatives', sub_id, 'beta_maps')
    out_filename = pjoin(sub_out_dir, sub_id + outfile_suffix)
    os.makedirs(sub_out_dir, exist_ok=True)
    fmri_img = nib.load(pjoin(sub_dir, sub_id + tsk_prfx + 'bold.nii.gz'))
    nscans, t_r = fmri_img.shape[-1], fmri_img.header.get_zooms()[-1]
    frame_times = frame_times = np.arange(confounds.shape[0]) * t_r
    for row in tqdm(list(events.iterrows())):
        tnum = row[1].trial_number
        events['trial_type'] = [
            'X_' + row[1].condition
            if row[1].trial_number != tnum else row[1].condition
            for row in events.iterrows()
        ]
        mat_params = {
            'frame_times': frame_times,
            'events': events[['onset', 'duration', 'trial_type']],
            'add_regs': confounds,
            'drift_model': drift_model,
            'hrf_model': hrf_model
        }
        trial_matrix = make_first_level_design_matrix(**mat_params)
        trial_contrast = pd.Series(
            np.array([1] +
                     list(np.repeat(0, trial_matrix.shape[1] - 1)))).values
        glm_params = {
            't_r': t_r,
            'drift_model': drift_model,
            'standardize': True,
            'noise_model': noise_model,
            'hrf_model': hrf_model,
            'smoothing_fwhm': fwhm
        }
        fit_params = {'run_imgs': fmri_img, 'design_matrices': trial_matrix}
        con_params = {
            'contrast_def': trial_contrast,
            'output_type': 'effect_size'
        }
        contrast_list.append(
            FirstLevelModel().fit(**fit_params).compute_contrast(**con_params))
    nib.save(img=nilearn.image.concat_imgs(contrast_list),
             filename=out_filename)
def test_first_level_glm_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # initialize FirstLevelModel with memory option enabled
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
                                drift_model='polynomial', drift_order=3,
                                memory='nilearn_cache', memory_level=1,
                                minimize_memory=False)
        model.fit(func_img, events)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del mask, func_img, FUNCFILE, model
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords],
                                     radius=10,
                                     detrend=True,
                                     standardize=True,
                                     low_pass=0.1,
                                     high_pass=0.01,
                                     t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1,
                                     verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes,
                                                   hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)

    report = make_glm_report(
        first_level_model,
        contrasts=contrasts,
        title='ADHD DMN Report',
        cluster_threshold=15,
        height_control='bonferroni',
        min_distance=8.,
        plot_type='glass',
        report_dims=(1200, 'a'),
    )
    output_filename = 'generated_report_flm_adhd_dmn.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
def test_high_level_glm_different_design_matrices_formulas():
    # test that one can estimate a contrast when design matrices are different
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)

    # make column names identical
    design_matrices[1].columns = design_matrices[0].columns
    # add a column to the second design matrix
    design_matrices[1]['new'] = np.ones((19, 1))

    # Fit a glm with two sessions and design matrices
    multi_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data, design_matrices=design_matrices)

    # Compute contrast with formulas
    cols_formula = tuple(design_matrices[0].columns[:2])
    formula = "%s-%s" % cols_formula
    with pytest.warns(UserWarning, match='One contrast given, assuming it for all 2 runs'):
        z_joint_formula = multi_session_model.compute_contrast(
            formula, output_type='effect_size')
示例#21
0
def test_make_headings_with_contrasts_none_title_custom():
    model = FirstLevelModel()
    test_input = (None,
                  'Custom Title for report',
                  model,
                  )
    expected_output = ('Custom Title for report',
                       'Custom Title for report',
                       'First Level Model',
                       )
    actual_output = glmr._make_headings(*test_input)
    assert actual_output == expected_output
示例#22
0
def test_z_score_opposite_contrast():
    fmri, mask = generate_fake_fmri(shape=(50, 20, 50),
                                    length=96,
                                    rand_gen=np.random.RandomState(42))

    nifti_masker = NiftiMasker(mask_img=mask)
    data = nifti_masker.fit_transform(fmri)

    frametimes = np.linspace(0, (96 - 1) * 2, 96)

    for i in [0, 20]:
        design_matrix = make_first_level_design_matrix(
            frametimes,
            hrf_model='spm',
            add_regs=np.array(data[:, i]).reshape(-1, 1))
        c1 = np.array([1] + [0] * (design_matrix.shape[1] - 1))
        c2 = np.array([0] + [1] + [0] * (design_matrix.shape[1] - 2))
        contrasts = {'seed1 - seed2': c1 - c2, 'seed2 - seed1': c2 - c1}
        fmri_glm = FirstLevelModel(t_r=2.,
                                   noise_model='ar1',
                                   standardize=False,
                                   hrf_model='spm',
                                   drift_model='cosine')
        fmri_glm.fit(fmri, design_matrices=design_matrix)
        z_map_seed1_vs_seed2 = fmri_glm.compute_contrast(
            contrasts['seed1 - seed2'], output_type='z_score')
        z_map_seed2_vs_seed1 = fmri_glm.compute_contrast(
            contrasts['seed2 - seed1'], output_type='z_score')
        assert_almost_equal(z_map_seed1_vs_seed2.get_data().min(),
                            -z_map_seed2_vs_seed1.get_data().max(),
                            decimal=10)
        assert_almost_equal(z_map_seed1_vs_seed2.get_data().max(),
                            -z_map_seed2_vs_seed1.get_data().min(),
                            decimal=10)
def test_compute_contrast_num_contrasts():

    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19), (7, 8, 7, 13)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)

    # Fit a glm with 3 sessions and design matrices
    multi_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data, design_matrices=design_matrices)

    # raise when n_contrast != n_runs | 1
    with pytest.raises(ValueError):
        multi_session_model.compute_contrast([np.eye(rk)[1]]*2)

    multi_session_model.compute_contrast([np.eye(rk)[1]]*3)
    with pytest.warns(UserWarning, match='One contrast given, assuming it for all 3 runs'):
        multi_session_model.compute_contrast([np.eye(rk)[1]])
示例#24
0
def test_process_second_level_input_as_firstlevelmodels():
    """Unit tests for function
    _process_second_level_input_as_firstlevelmodels().
    """
    from nilearn.glm.second_level.second_level import _process_second_level_input_as_firstlevelmodels  # noqa
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = \
        generate_fake_fmri_data_and_design(shapes, rk)
    list_of_flm = [
        FirstLevelModel(mask_img=mask, subject_label=f"sub-{i}").fit(
            fmri_data[0], design_matrices=design_matrices[0]) for i in range(3)
    ]
    sample_map, subjects_label =\
        _process_second_level_input_as_firstlevelmodels(list_of_flm)
    assert subjects_label == [f"sub-{i}" for i in range(3)]
    assert isinstance(sample_map, Nifti1Image)
    assert sample_map.shape == (7, 8, 9)
示例#25
0
def test_high_level_glm_null_contrasts():
    # test that contrast computation is resilient to 0 values.
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = \
        generate_fake_fmri_data_and_design(shapes, rk)

    multi_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data, design_matrices=design_matrices)
    single_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = multi_session_model.compute_contrast(
        [np.eye(rk)[:1], np.zeros((1, rk))], output_type='stat')
    z2 = single_session_model.compute_contrast(np.eye(rk)[:1],
                                               output_type='stat')
    np.testing.assert_almost_equal(get_data(z1), get_data(z2))
示例#26
0
def test_infer_effect_maps():
    from nilearn.glm.second_level.second_level import _infer_effect_maps
    # with InTemporaryDirectory():
    shapes, rk = ((7, 8, 9, 1), (7, 8, 7, 16)), 3
    mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(
        shapes, rk)
    func_img = load(fmri_data[0])
    second_level_input = pd.DataFrame({
        'map_name': ["a", "b"],
        'effects_map_path': [fmri_data[0], "bar"]
    })
    assert _infer_effect_maps(second_level_input, "a") == [fmri_data[0]]
    with pytest.raises(ValueError, match="File not found: 'bar'"):
        _infer_effect_maps(second_level_input, "b")
    assert _infer_effect_maps([fmri_data[0]], None) == [fmri_data[0]]
    contrast = np.eye(rk)[1]
    second_level_input = [FirstLevelModel(mask_img=mask)] * 2
    for i, model in enumerate(second_level_input):
        model.fit(fmri_data[i], design_matrices=design_matrices[i])
    assert len(_infer_effect_maps(second_level_input, contrast)) == 2
    # Delete objects attached to files to avoid WindowsError when deleting
    # temporary directory (in Windows)
    del mask, fmri_data, func_img, second_level_input
示例#27
0
def test_first_level_with_scaling():
    shapes, rk = [(3, 1, 1, 2)], 1
    fmri_data = list()
    fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))
    design_matrices = list()
    design_matrices.append(
        pd.DataFrame(np.ones((shapes[0][-1], rk)),
                     columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
    fmri_glm = FirstLevelModel(mask_img=False,
                               noise_model='ols',
                               signal_scaling=0,
                               minimize_memory=True)
    assert fmri_glm.signal_scaling == 0
    assert not fmri_glm.standardize
    with pytest.warns(DeprecationWarning,
                      match="Deprecated. `scaling_axis` will be removed"):
        assert fmri_glm.scaling_axis == 0
    glm_parameters = fmri_glm.get_params()
    test_glm = FirstLevelModel(**glm_parameters)
    fmri_glm = fmri_glm.fit(fmri_data, design_matrices=design_matrices)
    test_glm = test_glm.fit(fmri_data, design_matrices=design_matrices)
    assert glm_parameters['signal_scaling'] == 0
示例#28
0
def test_first_level_hrf_model(hrf_model, spaces):
    """
    Ensure that FirstLevelModel runs without raising errors
    for different values of hrf_model. In particular, one checks that it runs
    without raising errors when given a custom response function.
    Also ensure that it computes contrasts without raising errors,
    even when event (ie condition) names have spaces.
    """
    shapes, rk = [(10, 10, 10, 25)], 3
    mask, fmri_data, _ =\
        generate_fake_fmri_data_and_design(shapes, rk)

    events = basic_paradigm(condition_names_have_spaces=spaces)

    model = FirstLevelModel(t_r=2.0, mask_img=mask, hrf_model=hrf_model)

    model.fit(fmri_data, events)

    columns = model.design_matrices_[0].columns
    model.compute_contrast(f"{columns[0]}-{columns[1]}")
# define the effects of interest contrast, a 2-dimensional contrasts
# spanning the two conditions.

contrasts = {
    'faces-scrambled': basic_contrasts['faces'] - basic_contrasts['scrambled'],
    'scrambled-faces': -basic_contrasts['faces'] + basic_contrasts['scrambled'],
    'effects_of_interest': np.vstack((basic_contrasts['faces'],
                                      basic_contrasts['scrambled']))
    }

#########################################################################
# Fit the GLM for the 2 sessions by specifying a FirstLevelModel and then
# fitting it.
from nilearn.glm.first_level import FirstLevelModel
print('Fitting a GLM')
fmri_glm = FirstLevelModel()
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Now we can compute contrast-related statistical maps (in z-scale), and plot
# them.
print('Computing contrasts')
from nilearn import plotting

# Iterate on contrasts
for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    # compute the contrasts
    z_map = fmri_glm.compute_contrast(
        contrast_val, output_type='z_score')
    # plot the contrasts as soon as they're generated
示例#30
0
#########################################################################
# Next solution is to try Finite Impulse Reponse (FIR) models: we just
# say that the hrf is an arbitrary function that lags behind the
# stimulus onset.  In the present case, given that the numbers of
# conditions is high, we should use a simple FIR model.
#
# Concretely, we set `hrf_model` to 'fir' and `fir_delays` to [1, 2,
# 3] (scans) corresponding to a 3-step functions on the [1 * t_r, 4 *
# t_r] seconds interval.
#

from nilearn.glm.first_level import FirstLevelModel
from nilearn.reporting import plot_design_matrix, plot_contrast_matrix

first_level_model = FirstLevelModel(t_r, hrf_model='fir', fir_delays=[1, 2, 3])
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]
plot_design_matrix(design_matrix)

#########################################################################
# We have to adapt contrast specification. We characterize the BOLD
# response by the sum across the three time lags. It's a bit hairy,
# sorry, but this is the price to pay for flexibility...

import numpy as np

contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
conditions = events.trial_type.unique()