Ejemplo n.º 1
0
def test_hkernel():
    """ test the hrf computation
    """
    tr = 2.0
    h = _hrf_kernel('spm', tr)
    assert_almost_equal(h[0], spm_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('spm + derivative', tr)
    assert_almost_equal(h[1], spm_time_derivative(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('spm + derivative + dispersion', tr)
    assert_almost_equal(h[2], spm_dispersion_derivative(tr))
    assert_equal(len(h), 3)
    h = _hrf_kernel('glover', tr)
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('glover + derivative', tr)
    assert_almost_equal(h[1], glover_time_derivative(tr))
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('fir', tr, fir_delays=np.arange(4))
    assert_equal(len(h), 4)
    for dh in h:
        assert_equal(dh.sum(), 50.)
    #
    h = _hrf_kernel(None, tr)
    assert_equal(len(h), 1)
    assert_almost_equal(h[0], np.hstack((1, np.zeros(49))))
Ejemplo n.º 2
0
def test_hkernel():
    """ test the hrf computation
    """
    tr = 2.0
    h = _hrf_kernel('spm', tr)
    assert_almost_equal(h[0], spm_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('spm + derivative', tr)
    assert_almost_equal(h[1], spm_time_derivative(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('spm + derivative + dispersion', tr)
    assert_almost_equal(h[2], spm_dispersion_derivative(tr))
    assert_equal(len(h), 3)
    h = _hrf_kernel('glover', tr)
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('glover + derivative', tr)
    assert_almost_equal(h[1], glover_time_derivative(tr))
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('fir', tr, fir_delays=np.arange(4))
    assert_equal(len(h), 4)
    for dh in h:
        assert_equal(dh.sum(), 50.)
    # 
    h = _hrf_kernel(None, tr)
    assert_equal(len(h), 1)
    assert_almost_equal(h[0], np.hstack((1, np.zeros(49))))
Ejemplo n.º 3
0
def test_glover_hrf():
    """ test that the spm_hrf is correctly normalized and has correct length
    """
    h = glover_hrf(2.0)
    assert_almost_equal(h.sum(), 1)
    assert_equal(len(h), 800)
    h = glover_dispersion_derivative(2.0)
    assert_almost_equal(h.sum(), 0)
    assert_equal(len(h), 800)
Ejemplo n.º 4
0
def test_glover_hrf():
    """ test that the spm_hrf is correctly normalized and has correct length
    """
    h = glover_hrf(2.0)
    assert_almost_equal(h.sum(), 1)
    assert_equal(len(h), 800)
    h = glover_dispersion_derivative(2.0)
    assert_almost_equal(h.sum(), 0)
    assert_equal(len(h), 800)
Ejemplo n.º 5
0
def test_hkernel():
    """ test the hrf computation
    """
    tr = 2.0
    h = _hrf_kernel('spm', tr)
    assert_almost_equal(h[0], spm_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('spm_time', tr)
    assert_almost_equal(h[1], spm_time_derivative(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('spm_time_dispersion', tr)
    assert_almost_equal(h[2], spm_dispersion_derivative(tr))
    assert_equal(len(h), 3)
    h = _hrf_kernel('canonical', tr)
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('canonical with derivative', tr)
    assert_almost_equal(h[1], glover_time_derivative(tr))
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('fir', tr, fir_delays=np.arange(4))
    assert_equal(len(h), 4)
    for dh in h:
        assert_equal(dh.sum(), 16.)
def test_hkernel():
    """ test the hrf computation
    """
    tr = 2.0
    h = _hrf_kernel('spm', tr)
    assert_almost_equal(h[0], spm_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('spm_time', tr)
    assert_almost_equal(h[1], spm_time_derivative(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('spm_time_dispersion', tr)
    assert_almost_equal(h[2], spm_dispersion_derivative(tr))
    assert_equal(len(h), 3)
    h = _hrf_kernel('canonical', tr)
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel('canonical with derivative', tr)
    assert_almost_equal(h[1], glover_time_derivative(tr))
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel('fir', tr, fir_delays=np.arange(4))
    assert_equal(len(h), 4)
    for dh in h:
        assert_equal(dh.sum(), 16.)
Ejemplo n.º 7
0
def test_hkernel():
    """ test the hrf computation
    """
    tr = 2.0
    h = _hrf_kernel("spm", tr)
    assert_almost_equal(h[0], spm_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel("spm + derivative", tr)
    assert_almost_equal(h[1], spm_time_derivative(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel("spm + derivative + dispersion", tr)
    assert_almost_equal(h[2], spm_dispersion_derivative(tr))
    assert_equal(len(h), 3)
    h = _hrf_kernel("glover", tr)
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 1)
    h = _hrf_kernel("glover + derivative", tr)
    assert_almost_equal(h[1], glover_time_derivative(tr))
    assert_almost_equal(h[0], glover_hrf(tr))
    assert_equal(len(h), 2)
    h = _hrf_kernel("fir", tr, fir_delays=np.arange(4))
    assert_equal(len(h), 4)
    for dh in h:
        assert_equal(dh.sum(), 16.0)
def _get_hrf_model(hrf_model=None, hrf_length=25., dt=1., normalize=False):
    """Returns HRF created with model hrf_model. If hrf_model is None,
    then a vector of 0 is returned

    Parameters
    ----------
    hrf_model: str
    hrf_length: float
    dt: float
    normalize: bool

    Returns
    -------
    hrf_0: hrf
    """
    if hrf_model == 'glover':
        hrf_0 = glover_hrf(tr=1., oversampling=1./dt, time_length=hrf_length)
    elif hrf_model == 'spm':
        hrf_0 = spm_hrf(tr=1., oversampling=1./dt, time_length=hrf_length)
    elif hrf_model == 'gamma':
        hrf_0 = _gamma_difference_hrf(1., oversampling=1./dt, time_length=hrf_length,
                                      onset=0., delay=6, undershoot=16., dispersion=1.,
                                      u_dispersion=1., ratio=0.167)
    elif hrf_model == 'bezier':
        # Bezier curves. We can indicate where is the undershoot and the peak etc
        hrf_0 = bezier_hrf(hrf_length=hrf_length, dt=dt, pic=[6,1], picw=2,
                           ushoot=[15,-0.2], ushootw=3, normalize=normalize)
    elif hrf_model == 'physio':
        # Balloon model. By default uses the parameters of Khalidov11
        hrf_0 = physio_hrf(hrf_length=hrf_length, dt=dt, normalize=normalize)
    else:
        # Mean 0 if no hrf_model is specified
        hrf_0 = np.zeros(hrf_length/dt)
        warnings.warn("The HRF model is not recognized, setting it to None")
    if normalize and hrf_model is not None:
        hrf_0 = hrf_0 / np.linalg.norm(hrf_0)
    return hrf_0
Ejemplo n.º 9
0
def test_glover_hrf():
    """ test that the spm_hrf is correctly normalized and has correct length
    """
    h = glover_hrf(2.0)
    assert_almost_equal(h.sum(), 1)
    assert_equal(len(h), 256)
Ejemplo n.º 10
0
    def get_cnn_matrix(self, ses, run, index, events):
        """Load quilted activations and arrange in DataFrame matched to fMRI.

        Parameters
        ----------
        ses : int
            MRI experimental session. either 1 or 2
        run : int
            Experimental run. 1-10
        index : pandas.TimeDelta
            The pandas index from the corresponding DataFrame containing the
            BOLD activity

        Returns
        -------
        dict
            One matrix for each layer of the network containing the layer
            activations for a single run

        """
        # print('Start of get_cnn_matrix')
        matrices = {}
        n_rows = len(index)

        # Get the event info for all runs (what stimuli were played when)
        langs = [x.split('/')[1] for x in events['stim_file']]
        spkrs = [
            x.split('/')[-1].split('_60s')[0] for x in events['stim_file']
        ]

        # Load quilted activations
        n_stim = len(spkrs)
        assert n_stim == 9
        quilted_acts = {'enu': {}, 'deu': {}, 'nld': {}}
        quilts_this_run = {}
        for stim in range(n_stim):
            if self.model == 'random_features':
                quilted_acts[langs[stim]][spkrs[stim]] = {
                    layer: np.random.rand(N_FRAMES_PER_QUILT,
                                          self.layer_sizes[layer])
                    for layer in LAYERS
                }
            else:
                quilt_file = '{}/{}/{}/{}_quilted.pkl'.format(
                    QUILT_DIR, langs[stim], self.model, spkrs[stim])
                print(f'Opening {quilt_file}')
                with open(quilt_file, 'rb') as qfile:
                    quilt = pickle.load(qfile)
                quilted_acts[langs[stim]][spkrs[stim]] = quilt
                for layer in LAYERS:
                    if stim == 0:
                        quilts_this_run[layer] = quilt[layer]
                    else:
                        quilts_this_run[layer] = np.concatenate(
                            (quilts_this_run[layer], quilt[layer]), axis=0)

        # Assemble quilted activations in dataframe of same length as fmri
        for layer in LAYERS:
            if self.shuffle_run:
                print('Shuffling run {} layer {}.'.format(run, layer))
                idx = np.arange(quilts_this_run[layer].shape[0])
                np.random.shuffle(idx)
                quilts_this_run[layer] = quilts_this_run[layer][idx, :]
                start = 0
                for stim in range(n_stim):
                    shape0 = quilted_acts[langs[stim]][
                        spkrs[stim]][layer].shape[0]
                    end = start + shape0
                    quilted_acts[langs[stim]][spkrs[stim]][
                        layer] = quilts_this_run[layer][start:end, :]
                    start += shape0
            dim = quilted_acts[langs[0]][spkrs[0]][layer].shape[1]
            activities = pandas.DataFrame(np.zeros((n_rows, dim)), index=index)
            # loop over the 9 stimuli quilts in the run
            for stim in range(n_stim):
                minn = np.min(quilted_acts[langs[stim]][spkrs[stim]][layer])
                maxx = np.max(quilted_acts[langs[stim]][spkrs[stim]][layer])
                onset = events['onset'][stim]
                offset = events['offset'][stim]
                activities[onset:offset] = quilted_acts[langs[stim]][
                    spkrs[stim]][layer]
                len(activities)

            # Apply HRF to activations
            # print('Applying HRF')
            hrf = glover_hrf(FRAME_RATE,
                             oversampling=1,
                             time_length=32.0,
                             onset=0.0)
            # print('After applying HRF')
            # plt.plot(hrf)
            # plt.plot(activities[0])
            activities_hrf = activities.apply(np.convolve,
                                              args=(hrf, ),
                                              axis=0)
            # Convert to timedelta again
            nrows = activities_hrf.shape[0]
            # import pdb; pdb.set_trace()
            # fr_ms = FRAME_RATE*1000
            time = np.arange(0, nrows + 1 * FRAME_RATE, FRAME_RATE)
            time = time[:nrows]  # to make sure we always have the right length
            assert len(time) == len(activities_hrf.index)
            activities_hrf.index = pandas.to_timedelta(time, unit='s')

            # Cut out just the timepoints corresponding to auditory stimulation
            block0 = activities_hrf[events['onset'][0] +
                                    pandas.to_timedelta(6, unit='s'):
                                    events['offset'][2]].to_numpy()
            block1 = activities_hrf[events['onset'][3] +
                                    pandas.to_timedelta(6, unit='s'):
                                    events['offset'][5]].to_numpy()
            block2 = activities_hrf[events['onset'][6] +
                                    pandas.to_timedelta(6, unit='s'):
                                    events['offset'][8]].to_numpy()
            Y = np.concatenate([
                block0[:BLOCK_LEN, :], block1[:BLOCK_LEN, :],
                block2[:BLOCK_LEN, :]
            ],
                               axis=0)

            # Y = activities_hrf.to_numpy()
            # print('Y convolved min layer-{} ses-{} run-{} min: {}'.format(layer, ses, run, np.min(Y)))

            # matrices[layer] = Y[:n_rows, :]
            matrices[layer] = Y

        return matrices
Ejemplo n.º 11
0
def _run_glm_in_parallel(dm, run, event, conf, func, hrf_model, noise_model, tr, osf, slice_time_ref, 
                         mask, rf_condition, logger):

    logger.info(f"Fitting GLM to run {run+1} ...")
    n_vols = func.shape[0]
    start_time = slice_time_ref * tr
    end_time = (n_vols - 1 + slice_time_ref) * tr
    frame_times = np.linspace(start_time, end_time, n_vols)

    if mask is not None:
        func = func[:, mask.ravel()]

    if dm is not None:
        logger.info("Design-matrix was supplied, so fitting GLM immediately.")
        dm.index = frame_times
        conf.index = dm.index
        glm_results = run_glm(func, dm.values, noise_model=noise_model)
        return glm_results[0], glm_results[1], dm

    if not isinstance(hrf_model, str):  # custom HRF!
        logger.info("Using custom HRF-model.")
        conds = sorted(event.trial_type.unique())
        cols = ['constant'] + conds

        if isinstance(hrf_model, np.ndarray):
            # It's a SINGLE HRF (not a voxel-specific)
            X = np.zeros((n_vols, len(conds) + 1))
            X[:, 0] = 1  # intercept

            for i, con in enumerate(conds):  # loop over conditions
                trials = event.query('trial_type == @con')
                exp_cond = trials.loc[:, ['onset', 'duration', 'weight']]
                exp_cond['weight'] = 1

                # Upsample
                x, hr_frame_times = _sample_condition(exp_cond.values.T, frame_times, oversampling=osf)

                # Convolve predictor
                xconv = np.convolve(x, hrf_model)[:x.shape[0]]

                # Downsample
                f_interp = interp1d(hr_frame_times, xconv)
                X[:, i+1] = f_interp(frame_times)

            # Save the design matrix
            dm = pd.DataFrame(data=X, columns=cols, index=frame_times)

        elif isinstance(hrf_model, ResponseFitter):

            # Assuming it's a per-voxel GLM
            hrf_tc = hrf_model.get_timecourses()
            if rf_condition is not None:
                hrf_tc = hrf_tc.loc[hrf_tc.index.get_level_values(0) == rf_condition, :]

            tlength = hrf_tc.index.get_level_values(-1).values[-1]
            hrf_values = hrf_tc.values

            n_vox = func.shape[1]
            labels, results = [], {}
            canon = glover_hrf(tr=TR, oversampling=osf, time_length=tlength, onset=0.0)
            #canon /= canon.max()

            hrfs = np.zeros((3, canon.size, n_vox))
            for vox in range(n_vox):
                
                if vox % 1000 == 0:
                    print(f"Voxel {vox} / {n_vox} (run {run + 1})")
                
                this_hrf = hrf_values[:, vox]
                #this_hrf /= this_hrf.max()
                hrfs[0, :, vox] = this_hrf
                hrfs[1, :, vox] = canon
                #hrfs[2, :, vox] = this_hrf

                X = np.zeros((n_vols, len(conds) + 1))
                X[:, 0] = 1  # icept
                for i, con in enumerate(conds):
                    trials = event.query('trial_type == @con')
                    exp_cond = trials.loc[:, ['onset', 'duration', 'weight']]
                    exp_cond['weight'] = 1
                    x, hr_frame_times = _sample_condition(exp_cond.values.T, frame_times, oversampling=osf)

                    xconv = np.convolve(x, this_hrf)[:x.shape[0]]
                    #xconv = np.convolve(x, canon)[:x.shape[0]]
                    f_interp = interp1d(hr_frame_times, xconv)
                    X[:, i+1] = f_interp(frame_times)

                X = pd.DataFrame(data=X, columns=cols, index=frame_times)
                conf.index = X.index
                dm = pd.concat((X, conf), axis=1)
                lab, res = run_glm(func[:, vox, np.newaxis], dm.values, noise_model=noise_model)
                labels, results = _merge_regression_results(lab[0], res, labels, results, n_vox=n_vox)
            return np.array(labels), results, dm, hrfs
        else:
            raise ValueError("Unknown type for hrf_model; don't know what to do with it!")
    else:
        logger.info(f"Using default Nistats HRF model '{hrf_model}'.")
        dm = make_first_level_design_matrix(
            frame_times=frame_times,
            events=event,
            drift_model=None,
            hrf_model=hrf_model,
            fir_delays=None
        )

    conf.index = dm.index
    dm = pd.concat((dm, conf), axis=1)
    glm_results = run_glm(func, dm.values, noise_model=noise_model)
    return glm_results[0], glm_results[1], dm
def test_glover_hrf():
    """ test that the spm_hrf is correctly normalized and has correct length
    """
    h = glover_hrf(2.0)
    assert_almost_equal(h.sum(), 1)
    assert_equal(len(h), 256)
Ejemplo n.º 13
0
import numpy as np
import sys

from sklearn.base import clone
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
standard_kernel = ConstantKernel(1.) * RBF(length_scale=2.)

from nistats.hemodynamic_models import glover_hrf
from scipy.interpolate import interp1d
hrf = glover_hrf(1., 16., time_length=33)
glover = interp1d(np.linspace(0, 33., len(hrf), endpoint=False), hrf)
def zero_hrf(x):
    return np.zeros_like(x)

from gp import _get_hrf_measurements, _get_design_from_hrf_measures
from scipy.sparse import coo_matrix, eye, block_diag


def get_hrf_measurement_covariance(hrf_measurement_points, kernel,
                                   extra_points=None,
                                   eval_gradient=False):

    X = np.concatenate(hrf_measurement_points)
    if extra_points is not None:
        X = np.concatenate([X, extra_points])
    return kernel(X[:, np.newaxis], eval_gradient=eval_gradient)


def get_collapser_Zbeta(beta_values, modulation,
                        beta_indices,
                        n_extra_points=0):