Пример #1
0
def get_bold():
    # TODO add second model
    hrf_x = np.linspace(0, 25, 250)
    hrf = double_gamma_hrf(hrf_x) - single_gamma_hrf(hrf_x, 0.8, 1, 0.05)

    samples = 1200
    exp_time = np.linspace(0, 120, samples)

    fast_er_onsets = np.array([50, 240, 340, 590, 640, 940, 960])
    fast_er = np.zeros(samples)
    fast_er[fast_er_onsets] = 1

    model_hr = np.convolve(fast_er, hrf)[:samples]

    tr = 2.0
    model_lr = signal.resample(model_hr, int(samples / tr / 10), window='ham')

    ## moderate noise level
    baseline = 800
    wsignal = baseline + 8.0 \
              * model_lr + np.random.randn(int(samples / tr / 10)) * 4.0
    nsignal = baseline \
              + np.random.randn(int(samples / tr / 10)) * 4.0

    ds = Dataset(samples=np.array([wsignal, nsignal]).T,
                 sa={'model': model_lr})

    return ds
Пример #2
0
def simple_hrf_dataset(events=None,
                       hrf_gen=lambda t: double_gamma_hrf(t) -
                       single_gamma_hrf(t, 0.8, 1, 0.05),
                       fir_length=15,
                       nsamples=None,
                       tr=2.0,
                       tres=1,
                       baseline=800.0,
                       signal_level=1,
                       noise='normal',
                       noise_level=1,
                       resampling='scipy'):
    """
    events: list of Events or ndarray of onsets for simple(r) designs
    """
    if events is None:
        events = [1, 20, 25, 50, 60, 90, 92, 140]
    if isinstance(events, np.ndarray) or not isinstance(events[0], dict):
        events = [Event(onset=o) for o in events]
    else:
        assert (isinstance(events, list))
        for e in events:
            assert (isinstance(e, dict))

    # play fmri
    # full-blown HRF with initial dip and undershoot ;-)
    hrf_x = np.arange(0, float(fir_length) * tres, tres)
    if isinstance(hrf_gen, np.ndarray):
        # just accept provided HRF and only verify size match
        assert (len(hrf_x) == len(hrf_gen))
        hrf = hrf_gen
    else:
        # actually generate it
        hrf = hrf_gen(hrf_x)
    if not nsamples:
        # estimate number of samples needed if not provided
        max_onset = max([e['onset'] for e in events])
        nsamples = int(max_onset / tres + len(hrf_x) * 1.5)

    # come up with an experimental design
    fast_er = np.zeros(nsamples)
    for e in events:
        on = int(e['onset'] / float(tres))
        off = int((e['onset'] + e.get('duration', 1.)) / float(tres))
        if off == on:
            off += 1  # so we have at least 1 point
        assert (range(on, off))
        fast_er[on:off] = e.get('intensity', 1)
    # high resolution model of the convolved regressor
    model_hr = np.convolve(fast_er, hrf)[:nsamples]

    # downsample the regressor to fMRI resolution
    if resampling == 'scipy':
        from scipy import signal
        model_lr = signal.resample(model_hr,
                                   int(tres * nsamples / tr),
                                   window='ham')
    elif resampling == 'naive':
        if tr % tres != 0.0:
            raise ValueError("You must use resample='scipy' since your TR=%.2g"
                             " is not multiple of tres=%.2g" % (tr, tres))
        if tr < tres:
            raise ValueError("You must use resample='scipy' since your TR=%.2g"
                             " is less than tres=%.2g" % (tr, tres))
        step = int(tr // tres)
        model_lr = model_hr[::step]
    else:
        raise ValueError("resampling can only be 'scipy' or 'naive'. Got %r" %
                         resampling)

    # generate artifical fMRI data: two voxels one is noise, one has
    # something
    wsignal = baseline + model_lr * signal_level
    nsignal = np.ones(wsignal.shape) * baseline

    # build design matrix: bold-regressor and constant
    design = np.array([model_lr, np.repeat(1, len(model_lr))]).T

    # two 'voxel' dataset
    ds = dataset_wizard(samples=np.array((wsignal, nsignal)).T, targets=1)
    ds.a['baseline'] = baseline
    ds.a['tr'] = tr
    ds.sa['design'] = design

    ds.fa['signal_level'] = [signal_level, False]

    if noise == 'autocorrelated':
        # this one seems to be quite unstable and can provide really
        # funky noise at times
        noise = autocorrelated_noise(ds,
                                     1 / tr,
                                     1 / (2 * tr),
                                     lfnl=noise_level,
                                     hfnl=noise_level,
                                     add_baseline=False)
    elif noise == 'normal':
        noise = np.random.randn(*ds.shape) * noise_level
    else:
        raise ValueError(noise)
    ds.sa['noise'] = noise
    ds.samples += noise
    return ds
Пример #3
0
def simple_hrf_dataset(events=[1, 20, 25, 50, 60, 90, 92, 140],
                       hrf_gen=lambda t:double_gamma_hrf(t) - single_gamma_hrf(t, 0.8, 1, 0.05),
                       fir_length=15,
                       nsamples=None,
                       tr=2.0,
                       tres=1,
                       baseline=800.0,
                       signal_level=1,
                       noise='normal',
                       noise_level=1,
                       resampling='scipy',
                       ):
    """
    events: list of Events or ndarray of onsets for simple(r) designs
    """
    if isinstance(events, np.ndarray) or not isinstance(events[0], dict):
        events = [Event(onset=o) for o in events]
    else:
        assert(isinstance(events, list))
        for e in events:
            assert(isinstance(e, dict))

    # play fmri
    # full-blown HRF with initial dip and undershoot ;-)
    hrf_x = np.arange(0, float(fir_length)*tres, tres)
    if isinstance(hrf_gen, np.ndarray):
        # just accept provided HRF and only verify size match
        assert(len(hrf_x) == len(hrf_gen))
        hrf = hrf_gen
    else:
        # actually generate it
        hrf = hrf_gen(hrf_x)
    if not nsamples:
        # estimate number of samples needed if not provided
        max_onset = max([e['onset'] for e in events])
        nsamples = int(max_onset/tres + len(hrf_x)*1.5)

    # come up with an experimental design
    fast_er = np.zeros(nsamples)
    for e in events:
        on = int(e['onset'] / float(tres))
        off = int((e['onset'] + e.get('duration', 1.)) / float(tres))
        if off == on:
            off += 1                      # so we have at least 1 point
        assert(range(on, off))
        fast_er[on:off] = e.get('intensity', 1)
    # high resolution model of the convolved regressor
    model_hr = np.convolve(fast_er, hrf)[:nsamples]

    # downsample the regressor to fMRI resolution
    if resampling == 'scipy':
        from scipy import signal
        model_lr = signal.resample(model_hr,
                                   int(tres * nsamples / tr),
                                   window='ham')
    elif resampling == 'naive':
        if tr % tres != 0.0:
            raise ValueError("You must use resample='scipy' since your TR=%.2g"
                             " is not multiple of tres=%.2g" % (tr, tres))
        if tr < tres:
            raise ValueError("You must use resample='scipy' since your TR=%.2g"
                             " is less than tres=%.2g" % (tr, tres))
        step = int(tr // tres)
        model_lr = model_hr[::step]
    else:
        raise ValueError("resampling can only be 'scipy' or 'naive'. Got %r"
                         % resampling)

    # generate artifical fMRI data: two voxels one is noise, one has
    # something
    wsignal = baseline + model_lr*signal_level
    nsignal = np.ones(wsignal.shape) * baseline

    # build design matrix: bold-regressor and constant
    design = np.array([model_lr, np.repeat(1, len(model_lr))]).T

    # two 'voxel' dataset
    ds = dataset_wizard(samples=np.array((wsignal, nsignal)).T, targets=1)
    ds.a['baseline'] = baseline
    ds.a['tr'] = tr
    ds.sa['design'] = design

    ds.fa['signal_level'] = [signal_level, False]

    if noise == 'autocorrelated':
        # this one seems to be quite unstable and can provide really
        # funky noise at times
        noise = autocorrelated_noise(ds, 1/tr, 1/(2*tr),
                                     lfnl=noise_level, hfnl=noise_level,
                                     add_baseline=False)
    elif noise == 'normal':
        noise = np.random.randn(*ds.shape) * noise_level
    else:
        raise ValueError(noise)
    ds.sa['noise'] = noise
    ds.samples += noise
    return ds