def test_sample_posterior(self): isa = ISA(2, 3, num_scales=10) isa.A = asarray([[1., 0., 1.], [0., 1., 1.]]) isa.initialize() params = isa.default_parameters() params['gibbs']['verbosity'] = 0 params['gibbs']['num_iter'] = 100 states_post = isa.sample_posterior(isa.sample(1000), params) states_prio = isa.sample_prior(states_post.shape[1]) states_post = states_post.flatten() states_post = states_post[permutation(states_post.size)] states_prio = states_prio.flatten() states_prio = states_prio[permutation(states_prio.size)] # on average, posterior samples should be distributed like prior samples p = ks_2samp(states_post, states_prio)[1] self.assertGreater(p, 0.0001) samples = isa.sample(100) states = isa.sample_posterior(samples, params) # reconstruction should be perfect self.assertLess(sum(square(dot(isa.A, states) - samples).flatten()), 1e-10)
def test_evaluate(self): isa1 = ISA(2) isa1.A = eye(2) subspaces = isa1.subspaces() for gsm in subspaces: gsm.scales = ones(gsm.num_scales) isa1.set_subspaces(subspaces) # equivalent overcomplete model isa2 = ISA(2, 4) A = copy(isa2.A) A[:, :2] = isa1.A / sqrt(2.) A[:, 2:] = isa1.A / sqrt(2.) isa2.A = A subspaces = isa2.subspaces() for gsm in subspaces: gsm.scales = ones(gsm.num_scales) isa2.set_subspaces(subspaces) data = isa1.sample(100) # the results should not depend on the parameters ll1 = isa1.evaluate(data) ll2 = isa2.evaluate(data) self.assertLess(abs(ll1 - ll2), 1e-5) isa1 = ISA(2) isa1.initialize() # equivalent overcomplete model isa2 = ISA(2, 4) isa2.set_subspaces(isa1.subspaces() * 2) A = isa2.basis() A[:, :2] = isa1.basis() A[:, 2:] = 0. isa2.set_basis(A) data = isa1.sample(100) params = isa2.default_parameters() params['ais']['num_iter'] = 100 params['ais']['num_samples'] = 100 ll1 = isa1.evaluate(data) ll2 = isa2.evaluate(data, params) self.assertLess(abs(ll1 - ll2), 0.1)
def test_train_mp(self): isa = ISA(5, 10) params = isa.default_parameters() params['training_method'] = 'MP' params['mp']['num_coeff'] = 4 samples = isa.sample(100) states = isa.matching_pursuit(samples, params) # simple sanity checks self.assertEqual(states.shape[1], 100) self.assertEqual(states.shape[0], 10) self.assertFalse(any(sum(states > 0., 0) > 4)) # make sure training with MP doesn't throw any errors isa.train(isa.sample(1011), params)
def test_sample(self): isa = ISA(3, 4) samples = isa.sample(100) samples_prior = isa.sample_prior(100) # simple sanity checks self.assertEqual(samples.shape[0], isa.num_visibles) self.assertEqual(samples.shape[1], 100) self.assertEqual(samples_prior.shape[0], isa.num_hiddens) self.assertEqual(samples_prior.shape[1], 100)
def test_sample_posterior_ais(self): isa = ISA(2, 3, num_scales=10) isa.A = asarray([[1., 0., 1.], [0., 1., 1.]]) isa.initialize() params = isa.default_parameters() params['ais']['verbosity'] = 0 params['ais']['num_iter'] = 100 samples = isa.sample(100) states, _ = isa.sample_posterior_ais(samples, params) # reconstruction should be perfect self.assertLess(sum(square(dot(isa.A, states) - samples).flatten()), 1e-10)
def test_loglikelihood(self): isa = ISA(7, ssize=3) samples = isa.sample(100) energy = isa.prior_energy(dot(inv(isa.A), samples)) loglik = isa.loglikelihood(samples) # difference between loglik and -energy should be const self.assertTrue(var(loglik + energy) < 1e-10) isa = ISA(2, 3) samples = isa.sample(20) params = isa.default_parameters() params['ais']['num_samples'] = 5 params['ais']['num_iter'] = 10 loglik = isa.loglikelihood(samples, params, return_all=True) # simple sanity checks self.assertTrue(loglik.shape[0], params['ais']['num_samples']) self.assertTrue(loglik.shape[1], samples.shape[1])
def test_train_lbfgs(self): isa = ISA(2) isa.initialize() isa.A = eye(2) samples = isa.sample(10000) # initialize close to original parameters isa.A = asarray([[cos(0.4), sin(0.4)], [-sin(0.4), cos(0.4)]]) params = isa.default_parameters() params['training_method'] = 'LBFGS' params['train_prior'] = False params['max_iter'] = 1 params['lbfgs']['max_iter'] = 50 isa.train(samples, params) # L-BFGS should be able to recover the parameters self.assertLess(sqrt(sum(square(isa.A.flatten() - eye(2).flatten()))), 0.1)
def test_merge(self): isa1 = ISA(5, ssize=2) isa2 = ISA(5) isa1.initialize() isa1.orthogonalize() isa2.initialize() isa2.A = isa1.A params = isa2.default_parameters() params['train_basis'] = False params['merge_subspaces'] = True params['merge']['verbosity'] = 0 isa2.train(isa1.sample(10000), params) ssizes1 = [gsm.dim for gsm in isa1.subspaces()] ssizes2 = [gsm.dim for gsm in isa2.subspaces()] # algorithm should be able to recover subspace sizes self.assertTrue(all(sort(ssizes1) == sort(ssizes2)))