Esempio n. 1
0
def test_dprime():
    """Test dprime and dprime_2afc accuracy
    """
    assert_raises(TypeError, ea.dprime, 'foo', 0, 0, 0)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ea.dprime((1.1, 0, 0, 0))
    assert_equal(len(w), 1)
    assert_equal(0, ea.dprime((1, 0, 1, 0)))
    assert_equal(np.inf, ea.dprime((1, 0, 2, 1), False))
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime_2afc((5, 1)))
    assert_raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    assert_raises(ValueError, ea.dprime, (1, 2, 3))
    assert_raises(ValueError, ea.dprime_2afc, (1, 2, 3))
    assert_equal(np.sum(ea.dprime_2afc([[5, 1], [1, 5]])), 0)
def test_dprime():
    """Test dprime and dprime_2afc accuracy."""
    assert_raises(TypeError, ea.dprime, 'foo', 0, 0, 0)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ea.dprime((1.1, 0, 0, 0))
    assert_equal(len(w), 1)
    for resp, want in (
            ((1, 1, 1, 1), [0, 0]),
            ((1, 0, 0, 1), [1.34898, 0.]),
            ((0, 1, 0, 1), [0, 0.67449]),
            ((0, 0, 1, 1), [0, 0]),
            ((1, 0, 1, 0), [0, -0.67449]),
            ((0, 1, 1, 0), [-1.34898, 0.]),
            ((0, 1, 1, 0), [-1.34898, 0.])):
        assert_allclose(ea.dprime(resp, return_bias=True),
                        want, atol=1e-5)
    assert_allclose([np.inf, -np.inf], ea.dprime((1, 0, 2, 1), False, True))
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime_2afc((5, 1)))
    assert_raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    assert_raises(ValueError, ea.dprime, (1, 2, 3))
    assert_raises(ValueError, ea.dprime_2afc, (1, 2, 3))
    assert_equal(np.sum(ea.dprime_2afc([[5, 1], [1, 5]])), 0)
    # test simple larger dimensionality support
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime([[[5, 0, 1, 0]]])[0][0])
Esempio n. 3
0
def test_dprime():
    """Test dprime and dprime_2afc accuracy."""
    assert_raises(TypeError, ea.dprime, 'foo', 0, 0, 0)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ea.dprime((1.1, 0, 0, 0))
    assert_equal(len(w), 1)
    assert_equal(0, ea.dprime((1, 0, 1, 0)))
    assert_equal(np.inf, ea.dprime((1, 0, 2, 1), False))
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime_2afc((5, 1)))
    assert_raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    assert_raises(ValueError, ea.dprime, (1, 2, 3))
    assert_raises(ValueError, ea.dprime_2afc, (1, 2, 3))
    assert_equal(np.sum(ea.dprime_2afc([[5, 1], [1, 5]])), 0)
    # test simple larger dimensionality support
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime([[[5, 0, 1, 0]]])[0][0])
Esempio n. 4
0
def test_dprime():
    """Test dprime and dprime_2afc accuracy."""
    assert_raises(TypeError, ea.dprime, 'foo', 0, 0, 0)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ea.dprime((1.1, 0, 0, 0))
    assert_equal(len(w), 1)
    for resp, want in (((1, 1, 1, 1), [0, 0]), ((1, 0, 0, 1), [1.34898, 0.]),
                       ((0, 1, 0, 1), [0, 0.67449]), ((0, 0, 1, 1), [0, 0]),
                       ((1, 0, 1, 0), [0, -0.67449]),
                       ((0, 1, 1, 0), [-1.34898,
                                       0.]), ((0, 1, 1, 0), [-1.34898, 0.])):
        assert_allclose(ea.dprime(resp, return_bias=True), want, atol=1e-5)
    assert_allclose([np.inf, -np.inf], ea.dprime((1, 0, 2, 1), False, True))
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime_2afc((5, 1)))
    assert_raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    assert_raises(ValueError, ea.dprime, (1, 2, 3))
    assert_raises(ValueError, ea.dprime_2afc, (1, 2, 3))
    assert_equal(np.sum(ea.dprime_2afc([[5, 1], [1, 5]])), 0)
    # test simple larger dimensionality support
    assert_equal(ea.dprime((5, 0, 1, 0)), ea.dprime([[[5, 0, 1, 0]]])[0][0])
def test_dprime():
    """Test dprime accuracy."""
    with pytest.warns(RuntimeWarning, match='cast to'):
        pytest.raises(IndexError, ea.dprime, 'foo')
        pytest.raises(ValueError, ea.dprime, ['foo', 0, 0, 0])
    with pytest.warns(RuntimeWarning, match='truncated'):
        ea.dprime((1.1, 0, 0, 0))
    for resp, want in (((1, 1, 1, 1), [0, 0]), ((1, 0, 0, 1), [1.34898, 0.]),
                       ((0, 1, 0, 1), [0, 0.67449]), ((0, 0, 1, 1), [0, 0]),
                       ((1, 0, 1, 0), [0, -0.67449]),
                       ((0, 1, 1, 0), [-1.34898,
                                       0.]), ((0, 1, 1, 0), [-1.34898, 0.])):
        assert_allclose(ea.dprime(resp, return_bias=True), want, atol=1e-5)
    assert_allclose([np.inf, -np.inf], ea.dprime((1, 0, 2, 1), False, True))
    pytest.raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    pytest.raises(ValueError, ea.dprime, (1, 2, 3))
    # test simple larger dimensionality support
    assert ea.dprime((5, 0, 1, 0)) == ea.dprime([[[5, 0, 1, 0]]])[0][0]
Esempio n. 6
0
def test_dprime():
    """Test dprime accuracy."""
    with pytest.warns(RuntimeWarning, match='cast to'):
        pytest.raises(IndexError, ea.dprime, 'foo')
        pytest.raises(ValueError, ea.dprime, ['foo', 0, 0, 0])
    with pytest.warns(RuntimeWarning, match='truncated'):
        ea.dprime((1.1, 0, 0, 0))
    for resp, want in (
            ((1, 1, 1, 1), [0, 0]),
            ((1, 0, 0, 1), [1.34898, 0.]),
            ((0, 1, 0, 1), [0, 0.67449]),
            ((0, 0, 1, 1), [0, 0]),
            ((1, 0, 1, 0), [0, -0.67449]),
            ((0, 1, 1, 0), [-1.34898, 0.]),
            ((0, 1, 1, 0), [-1.34898, 0.])):
        assert_allclose(ea.dprime(resp, return_bias=True),
                        want, atol=1e-5)
    assert_allclose([np.inf, -np.inf], ea.dprime((1, 0, 2, 1), False, True))
    pytest.raises(ValueError, ea.dprime, np.ones((5, 4, 3)))
    pytest.raises(ValueError, ea.dprime, (1, 2, 3))
    # test simple larger dimensionality support
    assert ea.dprime((5, 0, 1, 0)) == ea.dprime([[[5, 0, 1, 0]]])[0][0]
    trials_correct = np.array(trials_correct)

    pc = correct_tracker / count_tracker * 100
    pm = repmod_tracker / count_tracker * 100

    # %% Consider only when targ_mod and mask_mod disagree
    pm_disagree = pm[[0, 1], [1, 0]]
    pc_disagree = (100 - pm_disagree[0] + pm_disagree[1]) / 2

    hr = pm_disagree[1]
    mr = 100 - pm_disagree[1]
    fa = pm_disagree[0]
    cr = 100 - pm_disagree[0]

    #d-prime arrays
    dp_disagree, bias = dprime(np.transpose([hr, mr, fa, cr], [1, 2, 0]),
                               return_bias=True)
    bias_diss[j][0] = bias[0, 1]  #upright target
    bias_diss[j][1] = bias[0, 0]  #upright masker
    bias_diss[j][2] = bias[1, 1]  #flipped target
    bias_diss[j][3] = bias[1, 0]  #flipped masker

    dprime_diss[j][0] = dp_disagree[0, 1]  #upright target
    dprime_diss[j][1] = dp_disagree[0, 0]  #upright masker
    dprime_diss[j][2] = dp_disagree[1, 1]  #flipped target
    dprime_diss[j][3] = dp_disagree[1, 0]  #flipped masker

    # dprime difference array for subjects
    dprime_subjects[j][0] = (dp_disagree[0, 1] - dp_disagree[0, 0]
                             )  #upright T - upright M
    dprime_subjects[j][1] = (dp_disagree[0, 1] - dp_disagree[1, 1]
                             )  #upright T - flipped T
Esempio n. 8
0
b_prob = 0.6
f_prob = 0.2
subjs = ['a', 'b', 'c', 'd', 'e']
a_hit = np.random.binomial(targets, a_prob, len(subjs))
b_hit = np.random.binomial(targets, b_prob, len(subjs))
a_fa = np.random.binomial(foils, f_prob, len(subjs))
b_fa = np.random.binomial(foils, f_prob, len(subjs))
a_miss = targets - a_hit
b_miss = targets - b_hit
a_cr = foils - a_fa
b_cr = foils - b_fa
data = pd.DataFrame(dict(a_hit=a_hit, a_miss=a_miss, a_fa=a_fa, a_cr=a_cr,
                         b_hit=b_hit, b_miss=b_miss, b_fa=b_fa, b_cr=b_cr),
                    index=subjs)
# calculate dprimes
a_dprime = ea.dprime(data[['a_hit', 'a_miss', 'a_fa', 'a_cr']])
b_dprime = ea.dprime(data[['b_hit', 'b_miss', 'b_fa', 'b_cr']])
results = pd.DataFrame(dict(ctrl=a_dprime, test=b_dprime))
# plot
subplt, barplt = ea.barplot(results, axis=0, err_bars='sd', lines=True,
                            brackets=[(0, 1)], bracket_text=[r'$p < 10^{-9}$'])
subplt.yaxis.set_label_text('d-prime +/- 1 s.d.')
subplt.set_title('Each line represents a different subject')

# significance brackets example
trials_per_cond = 100
conds = ['ctrl', 'test']
diffs = ['easy', 'hard']
colnames = ['-'.join([x, y]) for x, y in zip(conds * 2,
            np.tile(diffs, (2, 1)).T.ravel().tolist())]
cond_prob = [0.9, 0.8]
Esempio n. 9
0
b_fa = np.random.binomial(foils, f_prob, len(subjs))
a_miss = targets - a_hit
b_miss = targets - b_hit
a_cr = foils - a_fa
b_cr = foils - b_fa
data = pd.DataFrame(dict(a_hit=a_hit,
                         a_miss=a_miss,
                         a_fa=a_fa,
                         a_cr=a_cr,
                         b_hit=b_hit,
                         b_miss=b_miss,
                         b_fa=b_fa,
                         b_cr=b_cr),
                    index=subjs)
# calculate dprimes
a_dprime = ea.dprime(data[['a_hit', 'a_miss', 'a_fa', 'a_cr']])
b_dprime = ea.dprime(data[['b_hit', 'b_miss', 'b_fa', 'b_cr']])
results = pd.DataFrame(dict(ctrl=a_dprime, test=b_dprime))
# plot
subplt, barplt = ea.barplot(results,
                            axis=0,
                            err_bars='sd',
                            lines=True,
                            brackets=[(0, 1)],
                            bracket_text=[r'$p < 10^{-9}$'])
subplt.yaxis.set_label_text('d-prime +/- 1 s.d.')
subplt.set_title('Each line represents a different subject')

# significance brackets example
trials_per_cond = 100
conds = ['ctrl', 'test']
trial_aggr_dict.update(dict(catg=uniq, word=uniq, tloc=uniq, floc=uniq,
                            onset_og=uniq, word_og=uniq, catg_og=uniq))
grby = ['subj', 'trial']
by_trial = trialdata.groupby(grby)
by_trial = by_trial.aggregate(trial_aggr_dict)
by_trial['tloc'] = by_trial['tloc'].apply(literal_eval)
by_trial['floc'] = by_trial['floc'].apply(literal_eval)
by_trial['word_og'] = by_trial['word_og'].apply(literal_eval)
by_trial['catg_og'] = by_trial['catg_og'].apply(literal_eval)
by_trial['onset_og'] = by_trial['onset_og'].apply(literal_eval)
by_trial['hrate'] = by_trial['hit'].astype(float) / (by_trial['targ'])
by_trial['frate'] = by_trial['fht'].astype(float) / (by_trial['foil'])
by_trial['mrate'] = by_trial['fal'].astype(float) / (by_trial['notg'])
by_trial['srate'] = by_trial['snd'].astype(float) / (by_trial['sandwich'])
by_trial['lrate'] = by_trial['lft'].astype(float) / (by_trial['leftover'])
by_trial['dprime'] = efa.dprime(by_trial[['hit', 'miss', 'fal', 'crj']].values)
by_trial = by_trial[trial_col_order].sort(grby)
by_trial.to_csv(op.join(outdir, 'trialLevelData.tsv'), sep='\t', index=False)

# AGGREGATE BY CONDITION
grby = ['subj', 'div', 'adj', 'idn', 'num']
by_cond = trialdata.groupby(grby)
by_cond = by_cond.aggregate(aggregation_dict)
by_cond['hrate'] = by_cond['hit'].astype(float) / (by_cond['targ'])
by_cond['frate'] = by_cond['fht'].astype(float) / (by_cond['foil'])
by_cond['mrate'] = by_cond['fal'].astype(float) / (by_cond['notg'])
by_cond['srate'] = by_cond['snd'].astype(float) / (by_cond['sandwich'])
by_cond['lrate'] = by_cond['lft'].astype(float) / (by_cond['leftover'])
by_cond['dprime'] = efa.dprime(by_cond[['hit', 'miss', 'fal', 'crj']].values)
by_cond = by_cond[col_order_out].sort(grby)
#by_cond.to_csv(op.join(outdir, 'condLevelData.tsv'), sep='\t', index=False)