예제 #1
0
def test_softplus_spiker():
    params = [10, 2, 0, 5, 10, 10, 10, 200, 5, 2]
    I = np.zeros(2000, dtype='d')
    I[500:1500] = 0.5
    core.random_seed(1)
    Y, S1 = core.predict(I, params, dt, stochastic="softplus")
    core.random_seed(1)
    Y, S2 = core.predict(I, params, dt, stochastic="softplus")
    assert_true(np.all(S1 == S2))
예제 #2
0
 def test_poisson_spiker(self):
     params = [10, 2, 0, 5, 10, 10, 10, 200, 5, 2]
     I = np.zeros(2000, dtype='d')
     I[500:1500] = 0.5
     core.random_seed(1)
     Y, S1 = core.predict(I, params, dt, stochastic=True)
     core.random_seed(1)
     Y, S2 = core.predict(I, params, dt, stochastic=True)
     self.assertTrue(np.all(S1 == S2))
예제 #3
0
def test_phasic_response():
    params = np.asarray([10, 2, -0.3, 5, 10, 10, 10, 200, 5, 2])
    I = np.zeros(2000, dtype='d')
    I[200:] = 0.5
    Y, S = core.predict(I, params, dt)
    spk = S.nonzero()[0]
    assert_almost_equal(Y[-1, 0],
                        I[-1] * params[5],
                        msg="incorrect steady-state voltage")
    assert_equal(len(spk), 1)
    assert_true(spk[0] == 212)
예제 #4
0
def test_stimulus_upsample():
    params = [10, 2, 0, 5, 10, 10, 10, 200, 5, 2]
    I = np.zeros(1000, dtype='d')
    I[200:] = 0.55
    Y2, S2 = core.predict(I, params, dt, upsample=2)
    spk = S2.nonzero()[0]

    assert_equal(S2.size, I.size * 2)
    assert_equal(Y2[:, 1].nonzero()[0][0], 400)
    T = np.asarray([224, 502, 824])
    assert_true(np.all(T + 200 == spk[:3]))
예제 #5
0
def test_step_response():
    params = [10, 2, 0, 5, 10, 10, 10, 200, 5, 2]
    I = np.zeros(1000, dtype='d')
    I[200:] = 0.55
    Y, S = core.predict(I, params, dt)
    spk = S.nonzero()[0]

    assert_almost_equal(Y[-1, 1], I[-1], msg="incorrect current integration")
    assert_almost_equal(Y[-1, 0],
                        I[-1] * params[5],
                        msg="incorrect steady-state voltage")
    T = np.asarray([224, 502, 824])
    assert_true(np.all(T == spk))
예제 #6
0
def test_likelihood():
    I = np.zeros(2000, dtype='d')
    I[500:1500] = 0.55

    params_true = np.asarray([10, 2, 0, 5, 10, 10, 10, 200, 5, 2])
    Y_true, spk_v = core.predict(I, params_true, dt)
    S_obs = spk_v.nonzero()[0]

    llf = core.log_likelihood(spk_v, I, params_true, dt)

    V = core.voltage(I, params_true, dt)
    H = core.adaptation(spk_v, params_true[6:8], dt)
    mu = V[:, 0] - V[:, 2] - np.dot(H, params_true[:2]) - params_true[3]
    ll = np.sum(mu[S_obs]) - dt * np.sum(np.exp(mu))
    assert_almost_equal(llf, ll)

    params_guess = np.asarray([-50, -5, -5, 0, 10, 10, 10, 200, 5, 2])
    llf_g = core.log_likelihood(spk_v, I, params_guess, dt)
    assert_true(llf > llf_g)
예제 #7
0
def test_likelihood_upsample():
    # resampling does change the log-likelihood so this function just tests that
    # the upsampling works correctly
    from mat_neuron._model import log_likelihood_poisson
    I = np.zeros(2000, dtype='d')
    I[500:1500] = 0.55

    params_true = np.asarray([10, 2, 0, 5, 10, 10, 10, 200, 5, 2])
    Y_true, spk_v = core.predict(I, params_true, dt)
    V = Y_true[:, 0]
    H = core.adaptation(spk_v, params_true[6:8], dt)
    ll = log_likelihood_poisson(V, H, spk_v, params_true[:2], dt)
    llVds = log_likelihood_poisson(V[::2],
                                   H,
                                   spk_v,
                                   params_true[:2],
                                   dt,
                                   upsample=2)
    llIds = core.log_likelihood(spk_v, I[::2], params_true, dt, upsample=2)