Beispiel #1
0
 def test_get_start_uses_linear_regression(self, mock_linregress):
     mock_linregress.return_value = (1, 2, 'other')
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     start = ps.get_start(self.data[:, 0], self.data[:, 1])
     self.assertEqual(len(mock_linregress.mock_calls), 1)
     self.assertAlmostEqual(start[0], -2)
     self.assertAlmostEqual(start[1], 2)
Beispiel #2
0
 def test_jackknife_sem_drops_every_block(self):
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     ps.ml_fit = mock.Mock()
     ps.ml_fit.side_effect = [(k + np.zeros(3, 'd'), 1) for k in range(5)]
     ps.params = np.zeros(3, 'd')  # Needed for influence
     ps.jackknife_sem(self.data)
     self.assertEqual(len(ps.ml_fit.mock_calls), self.data.shape[0])
Beispiel #3
0
 def test_transform_results_in_predictions_on_transformed_values(self):
     mock_F = mock.Mock(return_value=0.5)
     mock_log = mock.Mock()
     ps = pmf.PsychometricFunction(mock_F, 0.5, mock_log)
     ps.predict('ANY_X_VALUE', ['ANY_THRES', 'ANY_WIDTH', .05])
     mock_F.assert_called_once_with(mock_log.return_value, 'ANY_THRES',
                                    'ANY_WIDTH')
     mock_log.assert_called_once_with('ANY_X_VALUE')
Beispiel #4
0
 def test_deviance_test_returns_right_number_of_samples_and_p_in_0_1(self):
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     ps.params = np.array([1., 1., .03])
     p, D = ps.deviance_test(self.data, nsamples=20)
     self.assertSequenceEqual(D.shape, (20, ))
     self.assertIsInstance(p, float)
     self.assertGreaterEqual(p, 0)
     self.assertLessEqual(p, 1)
Beispiel #5
0
 def test_fit_assigns_by_default(self):
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     self.assertIsNone(ps.params)
     params, ll = ps.ml_fit(self.data)
     self.assertIsNotNone(ps.params)
     self.assertIsNotNone(ps.data)
     self.assertEqual(len(params), 3)
     self.assertGreater(ll, 0)
Beispiel #6
0
    def test_log_likelihood(self):
        patch_psi = mock.patch('esdt.pmf.PsychometricFunction.predict')
        with patch_psi as mock_psi:
            mock_psi.return_value = .5
            ps = pmf.PsychometricFunction(lambda x, t, w: x)
            ps.data = self.data
            ps.negloglikelihood('ANY_PARAM', 1, 2, 3)

            self.assertEqual(len(mock_psi.mock_calls), 1)
            self.assertTrue(np.all(1 == mock_psi.mock_calls[0][1][0]))
            self.assertEqual(mock_psi.mock_calls[0][1][1], 'ANY_PARAM')
Beispiel #7
0
from esdt import pmf

# Here is some simple raw data
# The first column is stimulus intensity
# The second column is the fraction of correct responses
# The third column is the total number of responses collected
data = [[.3, .5, 20], [.5, .6, 20], [.7, .85, 20], [.9, .95, 20],
        [1.1, 1., 20]]
data = np.array(data)

# Create a psychometric function object. Here we're using a logistic sigmoid
# and we assume that we know the guessing rate (i.e. lower asymptote) to be
# 0.5. This would for example be the case for a psychometric function from a 2
# alternative forced choice paradigm.
pf = pmf.PsychometricFunction(pmf.logistic, 0.5)

# The psychometric function now contains our model specification, but it hasn't
# made contact with data yet. The ml_fit method performs a maximum likelihood
# fit.
pf.ml_fit(data)

# A quick and dirty way to get confidence regions is the jackknife. It is great
# to get an idea if your errors are in a reasonable ballpark, but it will
# generally *underestimate* the errorbars. So don't use this for any real
# analysis, but rather use the Bayesian inference approach for data analysis,
# that you want to publish.
se, r, infl = pf.jackknife_sem(data)

# As you can see, the jackknife procedure returns three measures,
# 1. a coarse standard error for each of your parameters (se)
Beispiel #8
0
 def test_transform_expresses_threshold_transformed(self):
     ps = pmf.PsychometricFunction(pmf.gumbel, 0.5, np.log10)
     self.data[:, 0] = [.01, .05, .1, .5]
     params, ll = ps.fit(self.data)
     self.assertGreater(params[0], -100)
     self.assertLess(params[0], np.log10(0.5))
Beispiel #9
0
 def test_fit_is_deprecated(self):
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     ps.ml_fit = mock.Mock()
     with self.assertWarns(DeprecationWarning):
         ps.fit('ANY_DATA', start='ANY_START')
     ps.ml_fit.assert_called_once_with('ANY_DATA', start='ANY_START')
Beispiel #10
0
 def test_fit_uses_starting_values_if_specified(self, mock_fmin):
     mock_fmin.return_value = np.ones(3, 'd')
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     ps.ml_fit(self.data, start='ANY_STARTING_VALUE')
     self.assertEqual(mock_fmin.mock_calls[0][1][1], 'ANY_STARTING_VALUE')
Beispiel #11
0
 def test_fit_does_not_assign_if_requested(self):
     ps = pmf.PsychometricFunction(pmf.logistic, 0.5)
     self.assertIsNone(ps.params)
     ps.ml_fit(self.data, assign=False)
     self.assertIsNone(ps.params)
     self.assertIsNone(ps.data)
Beispiel #12
0
    def test_posterior(self):
        ps = pmf.PsychometricFunction(lambda x, t, w: x, guess=0.5)
        ps.data = self.data
        p = ps.posterior(np.array([[.1, .1, .1]]).T)

        self.assertAlmostEqual(1., p.max())
Beispiel #13
0
import numpy as np
from esdt import pmf

data = np.array([[0.02, 0.5, 20], [0.025, 0.6, 20], [0.03, 0.55, 20],
                 [0.04, 0.6, 20], [0.05, 0.65, 20], [0.065, 0.8, 20],
                 [0.08, 0.7, 20], [0.1, 0.6, 20], [0.15, 0.8, 20],
                 [0.2, 0.6, 20], [0.25, 0.75, 20], [0.275, 0.8, 20],
                 [0.325, 0.9, 20], [0.325, 0.85, 20], [0.35, 0.8, 20],
                 [0.35, 0.9, 20], [0.4, 0.8, 20], [0.4, 0.95, 20],
                 [0.45, 0.95, 20], [0.5, 0.95, 20], [0.55, 0.7, 20]])

data_log = data.copy()
data_log[:, 0] = np.log10(data[:, 0])

ps_log = pmf.PsychometricFunction(pmf.gumbel, 0.5)
ps_trans = pmf.PsychometricFunction(pmf.gumbel, 0.5, transform=np.log10)

params_log, ll_log = ps_log.fit(data_log)
params_trans, ll_trans = ps_trans.fit(data)

print(params_log, params_trans)
print(ll_log, ll_trans)

l0 = ps_log.negloglikelihood(params_log, data_log[:, 0], data_log[:, 1],
                             data_log[:, 2])

l1 = ps_trans.negloglikelihood(params_log, data[:, 0], data[:, 1], data[:, 2])

assert l0 == l1
assert ll_log == ll_trans
assert abs(params_log - params_trans).max() < 1e-6