def test_factor(collection):
    # Full-rank dummy-coding, only one unique value in variable
    trial_type = collection.variables['trial_type'].clone()
    coll = collection.clone()
    transform.Factor(coll, 'trial_type', sep='@')
    assert 'trial_type@parametric gain' in coll.variables.keys()
    pg = coll.variables['trial_type@parametric gain']
    assert pg.values.unique() == [1]
    assert pg.values.shape == trial_type.values.shape

    # Reduced-rank dummy-coding, only one unique value in variable
    coll = collection.clone()
    transform.Factor(coll, 'trial_type', constraint='mean_zero')
    assert 'trial_type.parametric gain' in coll.variables.keys()
    pg = coll.variables['trial_type.parametric gain']
    assert pg.values.unique() == [1]
    assert pg.values.shape == trial_type.values.shape

    # full-rank dummy-coding, multiple values
    coll = collection.clone()
    transform.Factor(coll, 'respnum')
    targets = set(['respnum.%d' % d for d in range(0, 5)])
    assert not targets - set(coll.variables.keys())
    assert all([
        set(coll.variables[t].values.unique()) == {0.0, 1.0} for t in targets
    ])
    data = pd.concat([coll.variables[t].values for t in targets],
                     axis=1,
                     sort=True)
    assert (data.sum(1) == 1).all()

    # reduced-rank dummy-coding, multiple values
    coll = collection.clone()
    transform.Factor(coll, 'respnum', constraint='drop_one')
    targets = set(['respnum.%d' % d for d in range(1, 5)])
    assert not targets - set(coll.variables.keys())
    assert 'respnum.0' not in coll.variables.keys()
    assert all([
        set(coll.variables[t].values.unique()) == {0.0, 1.0} for t in targets
    ])
    data = pd.concat([coll.variables[t].values for t in targets],
                     axis=1,
                     sort=True)
    assert set(np.unique(data.sum(1).values.ravel())) == {0., 1.}

    # Effect coding, multiple values
    coll = collection.clone()
    transform.Factor(coll, 'respnum', constraint='mean_zero')
    targets = set(['respnum.%d' % d for d in range(1, 5)])
    assert not targets - set(coll.variables.keys())
    assert 'respnum.0' not in coll.variables.keys()
    assert all([
        set(coll.variables[t].values.unique()) == {-0.25, 0.0, 1.0}
        for t in targets
    ])
    data = pd.concat([coll.variables[t].values for t in targets],
                     axis=1,
                     sort=True)
    assert set(np.unique(data.sum(1).values.ravel())) == {-1., 1.}
Example #2
0
def test_orthogonalize_dense(collection):
    transform.Factor(collection, 'trial_type', sep=sep)

    sampling_rate = collection.sampling_rate
    # Store pre-orth variables needed for tests
    pg_pre = collection['trial_type/parametric gain'].to_dense(sampling_rate)
    rt = collection['RT'].to_dense(sampling_rate)

    # Orthogonalize and store result
    transform.Orthogonalize(collection, variables='trial_type/parametric gain',
                            other='RT', dense=True, groupby=['run', 'subject'])
    pg_post = collection['trial_type/parametric gain']

    # Verify that the to_dense() calls result in identical indexing
    ent_cols = ['subject', 'run']
    assert pg_pre.to_df()[ent_cols].equals(rt.to_df()[ent_cols])
    assert pg_post.to_df()[ent_cols].equals(rt.to_df()[ent_cols])

    vals = np.c_[rt.values, pg_pre.values, pg_post.values]
    df = pd.DataFrame(vals, columns=['rt', 'pre', 'post'])
    groupby = rt.get_grouper(['run', 'subject'])
    pre_r = df.groupby(groupby).apply(lambda x: x.corr().iloc[0, 1])
    post_r = df.groupby(groupby).apply(lambda x: x.corr().iloc[0, 2])
    assert (pre_r > 0.2).any()
    assert (post_r < 0.0001).all()
Example #3
0
def test_or(collection):
    coll = collection.clone()
    transform.Factor(coll, 'respnum')
    names = ['respnum.%d' % d for d in range(0, 5)]
    transform.Or(coll, names, output='disjunction')
    assert (coll.variables['disjunction'].values == 1).all()

    coll['copy'] = coll.variables['respnum.0'].clone()
    transform.Or(coll, ['respnum.0', 'copy'], output='or')
    assert coll.variables['or'].values.astype(float).equals(
        coll.variables['respnum.0'].values)
Example #4
0
def test_and(collection):
    coll = collection.clone()
    transform.Factor(coll, 'respnum')
    names = ['respnum.%d' % d for d in range(0, 5)]

    coll.variables['respnum.0'].onset += 1

    # Should fail because I misaligned variable
    with pytest.raises(ValueError):
        transform.And(coll, names, output='misaligned')

    # Should pass because dense is set to True and will align
    transform.And(coll, names, output='misaligned', dense=True)
Example #5
0
def test_resample_dense(collection):
    new_sampling_rate = 50
    old_sampling_rate = collection.sampling_rate
    upsampling = float(new_sampling_rate) / old_sampling_rate

    collection['RT'] = collection['RT'].to_dense(old_sampling_rate)
    old_rt = collection['RT'].clone()
    collection.resample(new_sampling_rate, in_place=True)
    assert math.floor(len(old_rt.values) * upsampling) == len(collection['RT'].values)
    # Should work after explicitly converting categoricals
    transform.Factor(collection, 'trial_type')

    new_sampling_rate2 = 5
    upsampling2 = float(new_sampling_rate2) / old_sampling_rate

    collection.resample(new_sampling_rate2, force_dense=True, in_place=True)
    assert len(old_rt.values) == math.ceil(float(len(collection['parametric gain'].values) / upsampling2))