Exemple #1
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
    del ds1['Y']
    del ds2['YCat']
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
                       "missing Var")
    assert_true(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]), "Combine "
                "with missing Factor")

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset(y1)
    ds2 = Dataset(y2)
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    assert_equal(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
                 "intersection failed.")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
Exemple #2
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
    del ds1['Y']
    del ds2['YCat']
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
                       "missing Var")
    ok_(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]),
        "Combine with missing "
        "Factor")

    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1, ))
    ds2 = Dataset((y2, ))
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
        "intersection failed.")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
Exemple #3
0
def test_difference():
    "Test table.difference"
    ds = datasets.get_uv()
    # add a variables that should stay in the dataset
    labels = {c: c[-1] for c in ds['rm'].cells}
    ds['rmf'] = Factor(ds['rm'], labels=labels)

    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', ds=ds)
    assert repr(dds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V, 'rmf':F}>"
    assert_array_equal(dds['rmf'], Factor(dds['rm'], labels=labels))
    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B', ds=ds)
    assert repr(dds) == "<Dataset n_cases=40 {'rm':F, 'fltvar':V, 'B':F, 'rmf':F}>"
    # difference of the difference
    ddds = table.difference('fltvar', 'B', 'b1', 'b2', 'rm', ds=dds)
    assert repr(ddds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V, 'rmf':F}>"
    dds = table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm', ds=ds)
    assert repr(dds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V, 'rmf':F}>"

    # create bigger dataset
    ds2 = ds.copy()
    ds['C', :] = 'c1'
    ds2['C', :] = 'c2'
    ds = combine((ds, ds2))
    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B%C', ds=ds)
    assert repr(dds) == "<Dataset n_cases=80 {'rm':F, 'fltvar':V, 'B':F, 'C':F, 'rmf':F}>"
    dds = table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm', by='C', ds=ds)
    assert repr(dds) == "<Dataset n_cases=40 {'rm':F, 'fltvar':V, 'C':F, 'rmf':F}>"
Exemple #4
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    n = ds1.n_cases
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[n:])

    # combine Datasets with unequal keys
    del ds1['Y']
    # raise
    assert_raises(KeyError, combine, (ds1, ds2))
    assert_raises(KeyError, combine, (ds2, ds1))
    # drop
    del ds2['YCat']
    ds = combine((ds1, ds2), incomplete='drop')
    ok_('Y' not in ds)
    ok_('YCat' not in ds)
    # fill in
    ds = combine((ds1, ds2), incomplete='fill in')
    assert_array_equal(ds['Y'].x[n:], ds2['Y'].x)
    assert_array_equal(np.isnan(ds['Y'].x[:n]), True)
    assert_array_equal(ds['YCat'][:n], ds1['YCat'])
    assert_array_equal(ds['YCat'][n:], '')

    # invalid input
    assert_raises(ValueError, combine, ())
    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    ds2 = Dataset((y2,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(list(y.sensor.names), ['1', '2', '3'], "Sensor dimension intersection")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
    # info
    assert_array_equal(dsc.info['a'], np.arange(2))
    eq_(len(dsc.info['b']), 1)
    assert_array_equal(dsc.info['b'][0], np.arange(2))
Exemple #5
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    n = ds1.n_cases
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[n:])

    # combine Datasets with unequal keys
    del ds1['Y']
    # raise
    assert_raises(KeyError, combine, (ds1, ds2))
    assert_raises(KeyError, combine, (ds2, ds1))
    # drop
    del ds2['YCat']
    ds = combine((ds1, ds2), incomplete='drop')
    ok_('Y' not in ds)
    ok_('YCat' not in ds)
    # fill in
    ds = combine((ds1, ds2), incomplete='fill in')
    assert_array_equal(ds['Y'].x[n:], ds2['Y'].x)
    assert_array_equal(np.isnan(ds['Y'].x[:n]), True)
    assert_array_equal(ds['YCat'][:n], ds1['YCat'])
    assert_array_equal(ds['YCat'][n:], '')

    # invalid input
    assert_raises(ValueError, combine, ())
    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    ds2 = Dataset((y2,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(list(y.sensor.names), ['1', '2', '3'], "Sensor dimension intersection")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
    # info
    assert_array_equal(dsc.info['a'], np.arange(2))
    eq_(len(dsc.info['b']), 1)
    assert_array_equal(dsc.info['b'][0], np.arange(2))
Exemple #6
0
def evoked_sns(e):

    e.set(epoch='epoch_behav_fltr', model='Type%DominanceCategory')
    dss = []
    for s in e:
        ds = e.load_evoked(baseline=(None, 0)) # baseline correct
        dss.append(ds)
         
    return combine(dss)
Exemple #7
0
def evoked_stc(e):

    e.set(epoch='epoch_behav_fltr', model='Type%DominanceCategory')
    dss = []
    for s in e:
        ds = e.load_evoked_stc(sns_baseline=(None, 0), morph_ndvar=True)
        dss.append(ds)
         
    return combine(dss)
Exemple #8
0
def test_difference():
    "Test table.difference"
    ds = datasets.get_uv()
    print table.difference("fltvar", "A", "a1", "a2", "rm", ds=ds)
    print table.difference("fltvar", "A", "a1", "a2", "rm", by="B", ds=ds)
    print table.difference("fltvar", "A%B", ("a1", "b1"), ("a2", "b2"), "rm", ds=ds)

    # create bigger dataset
    ds["C", :] = "c1"
    ds2 = datasets.get_uv()
    ds2["C", :] = "c2"
    ds = combine((ds, ds2))
    print table.difference("fltvar", "A", "a1", "a2", "rm", "B%C", ds=ds)
    print table.difference("fltvar", "A%B", ("a1", "b1"), ("a2", "b2"), "rm", "C", ds=ds)
Exemple #9
0
def test_difference():
    "Test tabe.difference"
    ds = datasets.get_uv()
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', ds=ds)
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B', ds=ds)
    print table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm',
                           ds=ds)

    # create bigger dataset
    ds['C', :] = 'c1'
    ds2 = datasets.get_uv()
    ds2['C', :] = 'c2'
    ds = combine((ds, ds2))
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', 'B%C', ds=ds)
    print table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm',
                           'C', ds=ds)
Exemple #10
0
def test_difference():
    "Test table.difference"
    ds = datasets.get_uv()
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', ds=ds)
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B', ds=ds)
    print table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm',
                           ds=ds)

    # create bigger dataset
    ds['C', :] = 'c1'
    ds2 = datasets.get_uv()
    ds2['C', :] = 'c2'
    ds = combine((ds, ds2))
    print table.difference('fltvar', 'A', 'a1', 'a2', 'rm', 'B%C', ds=ds)
    print table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm',
                           'C', ds=ds)
 def _generate_continuous(
     self,
     uts: UTS,  # time axis for the output
     ds: Dataset,  # events
     stim_var: str,
     code: Code,
     directory: Path,
 ):
     # place multiple input files into a continuous predictor
     cache = {
         stim: self._load(uts.tstep,
                          code.with_stim(stim).nuts_file_name(self.columns),
                          directory)
         for stim in ds[stim_var].cells
     }
     # determine type
     stim_type = {type(s) for s in cache.values()}
     assert len(stim_type) == 1
     stim_type = stim_type.pop()
     # generate x
     if stim_type is Dataset:
         dss = []
         for t, stim in ds.zip('T_relative', stim_var):
             x = cache[stim].copy()
             x['time'] += t
             dss.append(x)
             if code.nuts_method:
                 x_stop_ds = t_stop_ds(x, t)
                 dss.append(x_stop_ds)
         x = self._ds_to_ndvar(combine(dss), uts, code)
     elif stim_type is NDVar:
         v = cache[ds[0, stim_var]]
         dimnames = v.get_dimnames(first='time')
         dims = (uts, *v.get_dims(dimnames[1:]))
         x = NDVar.zeros(dims, code.key)
         for t, stim in ds.zip('T_relative', stim_var):
             x_stim = cache[stim]
             i_start = uts._array_index(t + x_stim.time.tmin)
             i_stop = i_start + len(x_stim.time)
             if i_stop > len(uts):
                 raise ValueError(
                     f"{code.string_without_rand} for {stim} is longer than the data"
                 )
             x.x[i_start:i_stop] = x_stim.get_data(dimnames)
     else:
         raise RuntimeError(f"stim_type={stim_type!r}")
     return x
Exemple #12
0
def write_logs(dss, output='concatenated', outFold=outFold, includeNonwords = False):
    """
    'concatenated': one output file
    'separated': multiple output files
    """
    
    if output == 'separated':
        for ds in dss:
            if includeNonwords == False:
                ds = ds[ds['RealWord'].isnot(0)]
            outPath = os.path.join(outFold, ds.name)
            ds.save_txt(outPath)
        print 'Done writing separate log files'
    elif output == 'concatenated':
        combined = combine(dss)
        if includeNonwords == False:
            combined = combined[combined['RealWord'].isnot(0)]
        outPath = os.path.join(outFold, 'all_subjects')
        combined.save_txt(outPath)
        print 'Done writing concatenated file'
        
def test_difference():
    "Test table.difference"
    ds = datasets.get_uv()
    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', ds=ds)
    assert repr(dds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V}>"
    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B', ds=ds)
    assert repr(dds) == "<Dataset n_cases=40 {'rm':F, 'fltvar':V, 'B':F}>"
    # difference of the difference
    ddds = table.difference('fltvar', 'B', 'b1', 'b2', 'rm', ds=dds)
    assert repr(ddds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V}>"
    dds = table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm', ds=ds)
    assert repr(dds) == "<Dataset n_cases=20 {'rm':F, 'fltvar':V}>"

    # create bigger dataset
    ds2 = ds.copy()
    ds['C', :] = 'c1'
    ds2['C', :] = 'c2'
    ds = combine((ds, ds2))
    dds = table.difference('fltvar', 'A', 'a1', 'a2', 'rm', by='B%C', ds=ds)
    assert repr(dds) == "<Dataset n_cases=80 {'rm':F, 'fltvar':V, 'B':F, 'C':F}>"
    dds = table.difference('fltvar', 'A%B', ('a1', 'b1'), ('a2', 'b2'), 'rm', by='C', ds=ds)
    assert repr(dds) == "<Dataset n_cases=40 {'rm':F, 'fltvar':V, 'C':F}>"
Exemple #14
0
def test_celltable():
    "Test the Celltable class."
    ds = datasets.get_uts()
    ds['cat'] = Factor('abcd', repeat=15)

    ct = Celltable('Y', 'A', ds=ds)
    eq_(ct.n_cases, 60)
    eq_(ct.n_cells, 2)
    eq_(repr(ct), "Celltable(Y, A)")
    eq_(repr(Celltable(ds['Y'].x, 'A', ds=ds)), "Celltable(<ndarray>, A)")
    eq_(repr(Celltable(ds['Y'].x, ds['A'].x, ds=ds)),
        "Celltable(<ndarray>, <Factor>)")

    ct = Celltable('Y', 'A', match='rm', ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.n_cells, 2)

    # cat argument
    ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.X[0], 'c')
    eq_(ct.X[-1], 'b')
    assert_raises(ValueError, Celltable, 'Y', 'cat', cat=('c', 'e'), ds=ds)

    ct = Celltable('Y', 'A', match='rm', ds=ds)
    eq_(ct.n_cases, 30)
    assert np.all(ct.groups['a0'] == ct.groups['a1'])

    ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.X[0], 'c')
    eq_(ct.X[-1], 'b')

    # catch unequal length
    assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', ds=ds)
    assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', match='rm', ds=ds)

    # coercion of numerical X
    X = ds.eval("A == 'a0'")
    ct = Celltable('Y', X, cat=(None, None), ds=ds)
    eq_(('False', 'True'), ct.cat)
    assert_array_equal(ct.data['True'], ds['Y', X])

    ct = Celltable('Y', X, cat=('True', 'False'), ds=ds)
    eq_(('True', 'False'), ct.cat)
    assert_array_equal(ct.data['True'], ds['Y', X])

    # test coercion of Y
    ct = Celltable(ds['Y'].x, 'A', ds=ds)
    assert_is_instance(ct.Y, np.ndarray)
    ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar)
    assert_is_instance(ct.Y, Var)

    # test sub
    ds_sub = ds.sub("A == 'a0'")
    ct_sub = Celltable('Y', 'B', ds=ds_sub)
    ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds)
    assert_dataobj_equal(ct_sub.Y, ct.Y)

    # test sub with rm
    ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub)
    ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds)
    assert_dataobj_equal(ct_sub.Y, ct.Y)

    # Interaction match
    ct = Celltable('Y', 'A', match='B % rm', ds=ds)
    ok_(ct.all_within)
    assert_dataobj_equal(combine((ct.data['a0'], ct.data['a1'])), ds['Y'])

    # test rm sorting
    ds = Dataset()
    ds['rm'] = Factor('abc', repeat=4)
    ds['Y'] = Var(np.arange(3.).repeat(4))
    ds['X'] = Factor('ab', repeat=2, tile=3)
    idx = np.arange(12)
    np.random.shuffle(idx)
    ds = ds[idx]
    ct = Celltable('Y', 'X', 'rm', ds=ds)
    assert_array_equal(ct.match, Factor('abc', tile=2))
    assert_array_equal(ct.Y, np.tile(np.arange(3.), 2))
    assert_array_equal(ct.X, Factor('ab', repeat=3))
Exemple #15
0
def test_celltable():
    "Test the Celltable class."
    ds = datasets.get_uts()
    ds['cat'] = Factor('abcd', repeat=15)

    ct = Celltable('Y', 'A', ds=ds)
    eq_(ct.n_cases, 60)
    eq_(ct.n_cells, 2)

    ct = Celltable('Y', 'A', match='rm', ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.n_cells, 2)

    # cat argument
    ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.X[0], 'c')
    eq_(ct.X[-1], 'b')
    assert_raises(ValueError, Celltable, 'Y', 'cat', cat=('c', 'e'), ds=ds)

    ct = Celltable('Y', 'A', match='rm', ds=ds)
    eq_(ct.n_cases, 30)
    assert np.all(ct.groups['a0'] == ct.groups['a1'])

    ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds)
    eq_(ct.n_cases, 30)
    eq_(ct.X[0], 'c')
    eq_(ct.X[-1], 'b')

    # catch unequal length
    assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', ds=ds)
    assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', match='rm', ds=ds)

    # coercion of numerical X
    X = ds.eval("A == 'a0'")
    ct = Celltable('Y', X, cat=(None, None), ds=ds)
    eq_(('False', 'True'), ct.cat)
    assert_array_equal(ct.data['True'], ds['Y', X])

    ct = Celltable('Y', X, cat=(True, False), ds=ds)
    eq_(('True', 'False'), ct.cat)
    assert_array_equal(ct.data['True'], ds['Y', X])

    # test coercion of Y
    ct = Celltable(ds['Y'].x, 'A', ds=ds)
    assert_is_instance(ct.Y, np.ndarray)
    ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar)
    assert_is_instance(ct.Y, Var)

    # test sub
    ds_sub = ds.sub("A == 'a0'")
    ct_sub = Celltable('Y', 'B', ds=ds_sub)
    ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds)
    assert_dataobj_equal(ct_sub.Y, ct.Y)

    # test sub with rm
    ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub)
    ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds)
    assert_dataobj_equal(ct_sub.Y, ct.Y)

    # Interaction match
    ct = Celltable('Y', 'A', match='B % rm', ds=ds)
    ok_(ct.all_within)
    assert_dataobj_equal(combine((ct.data['a0'], ct.data['a1'])), ds['Y'])

    # test rm sorting
    ds = Dataset()
    ds['rm'] = Factor('abc', repeat=4)
    ds['Y'] = Var(np.arange(3.).repeat(4))
    ds['X'] = Factor('ab', repeat=2, tile=3)
    idx = np.arange(12)
    np.random.shuffle(idx)
    ds = ds[idx]
    ct = Celltable('Y', 'X', 'rm', ds=ds)
    assert_array_equal(ct.match, Factor('abc', tile=2))
    assert_array_equal(ct.Y, np.tile(np.arange(3.), 2))
    assert_array_equal(ct.X, Factor('ab', repeat=3))
Exemple #16
0
# In case of repetative trials (where you will have `eelbrain.cases`), supply one predictor variable for each trial. Different perdictor variables for a single trial can be nested (See README).
#
# In this example we use two different predictor variables for a single trial

# For the common response we put impulses at the the presentation times of both the audio stimuli (i.e. beeps).
stim1 = np.zeros(len(time), dtype=np.double)
stim1[events[:, 0]] = 1.

# To distingusih between standard and deviant beeps, we assign 1 and -1 impulses respectively.
stim2 = stim1.copy()
stim2[events[np.where(events[:, 2] == 2), 0]] = -1.
stim1 = eelbrain.NDVar(stim1, time)
stim2 = eelbrain.NDVar(stim2, time)

# visualize the stimulus
s = plot.LineStack(eelbrain.combine([stim1, stim2]))

# ## Noise covariance estimation
# Now we shall estimate the noise covariance from empty room data. Dont forget to apply the same pre-processing steps to empty room data.
# instead you can choose to use pre-stimulus recordings to compute noise covariance.

Noise_path = (data_path + '/MEG/bst_auditory/' + 'S01_Noise_20131218_01.ds')
raw_empty_room = read_raw_ctf(Noise_path, preload=True)

# Apply the same pre-processing steps to empty room data
# raw_empty_room.info['bads'] = [
#     bb for bb in raw_AEF.info['bads'] if 'EEG' not in bb]
# raw_empty_room.add_proj(
#     [pp.copy() for pp in raw_AEF.info['projs'] if 'EEG' not in pp['desc']])

# raw_empty_room.plot_psd(tmax=60., average=False)
Exemple #17
0
#===============================================================================
# current format: [subject1, stem, stemS, stemEd],
#                 [subject2, stem, stemS, stemEd],
#                 ...
#
# new format: [subject1, stem],
#             [subject1, stemS], 
#             ...
#             [subject2, stem], 
#             ...
#===============================================================================

dr = '/Volumes/Backup/sufAmb/pickled/'
IVs = ['VerbGivenWord_nocov', 'VerbGivenWord', 'Ambiguity', 'WrdVerbyWeighted', 'WrdBiasWeighted']

for v in IVs:
    fil = dr+'ols_'+v+'.pickled'
    data = load.unpickle(fil)
    c1, c3 = [], []
    c2 = ['all', 'stem', 'stemS', 'stemEd']*data.n_cases
    for s in range(data.n_cases):
        c1.extend( [ data['subject'][s] ]*4 )
        c3.extend( [ data['all'][s], data['stem'][s], data['stemS'][s], data['stemEd'][s] ] )
        
    c1 = Factor(c1)
    c2 = Factor(c2)
    c3 = combine(c3)
        
    newds = Dataset(('subject',c1), ('Type',c2), ('beta',c3), info=data.info)
    
    save.pickle(newds, fil)
Exemple #18
0
def ols(e, IVs, DV='src', factor='Type', level='all', ndvar=True, parc='frontal_temporal_parietal'):
    """OLS for main effect, level of a factor, or main effect and all levels of a factor.\n
    \n
    Fits a separate model to each condition (level), so values of a particular variable may not be comparable across\n
    conditions, i.e. you can't do an ANCOVA this way. Instead, dummy code the conditions in the model matrix and\n
    compare these within the condition 'main'.
    Notes: currently this only handles a single factor.
    """
    
    e.set(epoch='epoch_behav_fltr', parc=parc)
    
    mri = ['R0053', 'R0606', 'R0632', 'R0684', 'R0734', 'R0845', 'R0850']
    
    # try to catch errors in input arguments early
    if DV != "src":
        raise NotImplementedError("untested for DV != src.")
    if not ndvar:
        raise NotImplementedError("untested for ndvar != True.")
    if not hasattr(IVs, "__getitem__"):
        raise TypeError("IVs is of type %s, which has no attribute __getitem__." % type(IVs))
    if not hasattr(IVs, "__iter__"):
        raise TypeError("IVs is of type %s, which has no attribute __iter__." % type(IVs))
    events = e.load_events()
    for iv in IVs:
        if events.has_key(iv) is False:
            raise ValueError("Variable %s is not in the log file." % iv)
    if factor is not None:
        if events.has_key(factor) is False:
            raise ValueError("Factor %s is not in the log file." % factor)
        if not level in events[factor].cells or level == 'all':
            raise ValueError("Level is not recognized.")
    else:
        if not level == "all":
            raise ValueError("Level must be == 'all' when factor == None.")
    del events
    for s in e:
        if s in mri: continue
        e.load_annot()  # raises an error if parc file not found in fsaverage
        break

    # each of these lists is a column; number of rows == len(betas)
    betas = []
    vars = []
    conditions = []
    subjects = []

    for s in e:
        print s
        
        # avoid IOError if parc only exists in fsaverage; parc will be reset below after loading epochs
        e.set(parc='')
        
        # load src epochs
        ds = e.load_epochs_stc(sns_baseline=(None, 0), ndvar=ndvar)
        
        # create smaller dataset that is just the parcellation
        if s in mri:
            ds[DV] = morph_source_space(ds[DV], 'fsaverage')
        else:
            ds[DV].source.subject = 'fsaverage'
        ds[DV].source.set_parc(parc)  # reset parc
        index = ds[DV].source.parc.isnotin(['unknown-lh', 'unknown-rh'])  # remove these
        ds[DV] = ds[DV].sub(source=index)
        
        # set which levels are to be used
        if factor is None:
            levels = ['main']
        else:
            if level == 'all':
                levels == ['main'] + (list(ds[factor].cells))
            else:
                levels = [level]
        
        # regress IVs within each level
        for l in levels:
            if l == 'main':
                lev = ds
            else:
                index = "%s == '%s'" % (factor, l)
                lev = ds.sub(index)
            mod = tuple(lev[iv] for iv in IVs)
            b = lev[DV].ols(mod)
            for name, value in zip(IVs, b):
                vars.append(name)
                betas.append(value)
                conditions.append(l)
                subjects.append(s)
        # after each iteration of e, number of rows in betas_ds is increased by n_levels * n_vars
        
    betas = combine(betas)  # combine list of NDVars into single NDVar
    vars = Factor(vars, name='predictors', random=False)
    conditions = Factor(conditions, name='levels', random=False)
    subjects = Factor(subjects, name='subjects', random=True)
    betas_ds = Dataset(('subject', subjects), ('condition', conditions), ('predictor', vars), ('beta', betas),
                       info={'predictors': IVs, 'outcome': DV, 'factor': factor})
        
    return betas_ds