def test_sample_posterior(self): isa = ISA(2, 3, num_scales=10) isa.A = asarray([[1., 0., 1.], [0., 1., 1.]]) isa.initialize() params = isa.default_parameters() params['gibbs']['verbosity'] = 0 params['gibbs']['num_iter'] = 100 states_post = isa.sample_posterior(isa.sample(1000), params) states_prio = isa.sample_prior(states_post.shape[1]) states_post = states_post.flatten() states_post = states_post[permutation(states_post.size)] states_prio = states_prio.flatten() states_prio = states_prio[permutation(states_prio.size)] # on average, posterior samples should be distributed like prior samples p = ks_2samp(states_post, states_prio)[1] self.assertGreater(p, 0.0001) samples = isa.sample(100) states = isa.sample_posterior(samples, params) # reconstruction should be perfect self.assertLess(sum(square(dot(isa.A, states) - samples).flatten()), 1e-10)
def test_sample_prior(self): isa = ISA(5, 10) samples = isa.sample_prior(20) # simple sanity checks self.assertEqual(samples.shape[0], 10) self.assertEqual(samples.shape[1], 20)
def test_sample_scales(self): isa = ISA(2, 5, num_scales=4) # get a copy of subspaces subspaces = isa.subspaces() # replace scales for gsm in subspaces: gsm.scales = asarray([1., 2., 3., 4.]) isa.set_subspaces(subspaces) samples = isa.sample_prior(100000) scales = isa.sample_scales(samples) # simple sanity checks self.assertEqual(scales.shape[0], isa.num_hiddens) self.assertEqual(scales.shape[1], samples.shape[1]) priors = mean( abs(scales.flatten() - asarray([[1., 2., 3., 4.]]).T) < 0.5, 1) # prior probabilities of scales should be equal and sum up to one self.assertLess(max(abs(priors - 1. / subspaces[0].num_scales)), 0.01) self.assertLess(abs(sum(priors) - 1.), 1e-10)
def test_sample(self): isa = ISA(3, 4) samples = isa.sample(100) samples_prior = isa.sample_prior(100) # simple sanity checks self.assertEqual(samples.shape[0], isa.num_visibles) self.assertEqual(samples.shape[1], 100) self.assertEqual(samples_prior.shape[0], isa.num_hiddens) self.assertEqual(samples_prior.shape[1], 100)
def test_prior_energy_gradient(self): isa = ISA(4) samples = isa.sample_prior(100) grad = isa.prior_energy_gradient(samples) # simple sanity checks self.assertEqual(grad.shape[0], samples.shape[0]) self.assertEqual(grad.shape[1], samples.shape[1]) f = lambda x: isa.prior_energy(x.reshape(-1, 1)).flatten() df = lambda x: isa.prior_energy_gradient(x.reshape(-1, 1)).flatten() for i in range(samples.shape[1]): relative_error = check_grad(f, df, samples[:, i]) / sqrt(sum(square(df(samples[:, i])))) # comparison with numerical gradient self.assertLess(relative_error, 0.001)
def test_prior_energy_gradient(self): isa = ISA(4) samples = isa.sample_prior(100) grad = isa.prior_energy_gradient(samples) # simple sanity checks self.assertEqual(grad.shape[0], samples.shape[0]) self.assertEqual(grad.shape[1], samples.shape[1]) f = lambda x: isa.prior_energy(x.reshape(-1, 1)).flatten() df = lambda x: isa.prior_energy_gradient(x.reshape(-1, 1)).flatten() for i in range(samples.shape[1]): relative_error = check_grad(f, df, samples[:, i]) / sqrt( sum(square(df(samples[:, i])))) # comparison with numerical gradient self.assertLess(relative_error, 0.001)
def test_initialize(self): def sqrtmi(mat): """ Compute matrix inverse square root. @type mat: array_like @param mat: matrix for which to compute inverse square root """ # find eigenvectors eigvals, eigvecs = eig(mat) # eliminate eigenvectors whose eigenvalues are zero eigvecs = eigvecs[:, eigvals > 0.] eigvals = eigvals[eigvals > 0.] # inverse square root return dot(eigvecs, dot(diag(1. / sqrt(eigvals)), eigvecs.T)) # white data data = randn(5, 1000) data = dot(sqrtmi(cov(data)), data) isa = ISA(5, 10) isa.initialize(data) # rows of A should be roughly orthogonal self.assertLess(sum(square(dot(isa.A, isa.A.T) - eye(5)).flatten()), 1e-3) p = kstest( isa.sample_prior(100).flatten(), lambda x: laplace.cdf(x, scale=1. / sqrt(2.)))[1] # prior marginals should be roughly Laplace self.assertGreater(p, 0.0001) # test initialization with larger subspaces isa = ISA(5, 10, ssize=2) isa.initialize(data)
def test_sample_scales(self): isa = ISA(2, 5, num_scales=4) # get a copy of subspaces subspaces = isa.subspaces() # replace scales for gsm in subspaces: gsm.scales = asarray([1., 2., 3., 4.]) isa.set_subspaces(subspaces) samples = isa.sample_prior(100000) scales = isa.sample_scales(samples) # simple sanity checks self.assertEqual(scales.shape[0], isa.num_hiddens) self.assertEqual(scales.shape[1], samples.shape[1]) priors = mean(abs(scales.flatten() - asarray([[1., 2., 3., 4.]]).T) < 0.5, 1) # prior probabilities of scales should be equal and sum up to one self.assertLess(max(abs(priors - 1. / subspaces[0].num_scales)), 0.01) self.assertLess(abs(sum(priors) - 1.), 1e-10)