def test_kl_zero(self): nchannels = 3 distr = np.array([0.1, 0.3, 0.05, 0.05, 0.2, 0.1, 0.1, 0.1]) # same states, KL divergence should converge to zero states = np.random.multinomial(1, distr, size=100000).argmax(1) states2 = np.random.permutation(states) distr = kl_tools.states2distr(states, nchannels) distr2 = kl_tools.states2distr(states2, nchannels) kl1 = kl_tools.mean_KL_estimate(distr, distr2) assert_almost_equal(kl1, 0., 3) distr = kl_tools.states2dict(states[:,None], nchannels) distr2 = kl_tools.states2dict(states2[:,None], nchannels) kl2, _ = kl_tools.kl_estimation(distr, distr2, 100000) assert_almost_equal(kl2, 0., 3)
def test_states2distr(self): nchannels = 4 patterns = np.array([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0], [1, 1, 1, 1], [0, 0, 0, 0]]) states = kl_tools.spikes2states(patterns) # histogram over all possible states desired = np.zeros(16) desired[10] = 3 desired[-1] = 1 desired[0] = 1 distr = kl_tools.states2distr(states, nchannels, normed=False) self.assertEqual(len(distr), 2**nchannels) assert_array_equal(desired, distr) distr = kl_tools.states2distr(states, nchannels, normed=True) self.assertEqual(len(distr), 2**nchannels) assert_array_equal(desired/5., distr)