def test_train_mp(self): isa = ISA(5, 10) params = isa.default_parameters() params['training_method'] = 'MP' params['mp']['num_coeff'] = 4 samples = isa.sample(100) states = isa.matching_pursuit(samples, params) # simple sanity checks self.assertEqual(states.shape[1], 100) self.assertEqual(states.shape[0], 10) self.assertFalse(any(sum(states > 0., 0) > 4)) # make sure training with MP doesn't throw any errors isa.train(isa.sample(1011), params)
def test_callback(self): isa = ISA(2) # callback function def callback(i, isa_): callback.count += 1 self.assertTrue(isa == isa_) callback.count = 0 # set callback function parameters = { 'verbosity': 0, 'max_iter': 7, 'callback': callback, 'sgd': { 'max_iter': 0 } } isa.train(randn(2, 1000), parameters=parameters) # test how often callback function was called self.assertEqual(callback.count, parameters['max_iter'] + 1) def callback(i, isa_): if i == 5: return False callback.count += 1 callback.count = 0 parameters['callback'] = callback isa.train(randn(2, 1000), parameters=parameters) # test how often callback function was called self.assertEqual(callback.count, 5) # make sure referece counts stay stable self.assertEqual(sys.getrefcount(isa) - 1, 1) self.assertEqual(sys.getrefcount(callback) - 1, 2)
def test_train_lbfgs(self): isa = ISA(2) isa.initialize() isa.A = eye(2) samples = isa.sample(10000) # initialize close to original parameters isa.A = asarray([[cos(0.4), sin(0.4)], [-sin(0.4), cos(0.4)]]) params = isa.default_parameters() params['training_method'] = 'LBFGS' params['train_prior'] = False params['max_iter'] = 1 params['lbfgs']['max_iter'] = 50 isa.train(samples, params) # L-BFGS should be able to recover the parameters self.assertLess(sqrt(sum(square(isa.A.flatten() - eye(2).flatten()))), 0.1)
def test_merge(self): isa1 = ISA(5, ssize=2) isa2 = ISA(5) isa1.initialize() isa1.orthogonalize() isa2.initialize() isa2.A = isa1.A params = isa2.default_parameters() params['train_basis'] = False params['merge_subspaces'] = True params['merge']['verbosity'] = 0 isa2.train(isa1.sample(10000), params) ssizes1 = [gsm.dim for gsm in isa1.subspaces()] ssizes2 = [gsm.dim for gsm in isa2.subspaces()] # algorithm should be able to recover subspace sizes self.assertTrue(all(sort(ssizes1) == sort(ssizes2)))
def test_callback(self): isa = ISA(2) # callback function def callback(i, isa_): callback.count += 1 self.assertTrue(isa == isa_) callback.count = 0 # set callback function parameters = { 'verbosity': 0, 'max_iter': 7, 'callback': callback, 'sgd': {'max_iter': 0} } isa.train(randn(2, 1000), parameters=parameters) # test how often callback function was called self.assertEqual(callback.count, parameters['max_iter'] + 1) def callback(i, isa_): if i == 5: return False callback.count += 1 callback.count = 0 parameters['callback'] = callback isa.train(randn(2, 1000), parameters=parameters) # test how often callback function was called self.assertEqual(callback.count, 5) # make sure referece counts stay stable self.assertEqual(sys.getrefcount(isa) - 1, 1) self.assertEqual(sys.getrefcount(callback) - 1, 2)
def test_train(self): # make sure train() doesn't throw any errors isa = ISA(2) params = isa.default_parameters() params['verbosity'] = 0 params['max_iter'] = 2 params['training_method'] = 'SGD' params['sgd']['max_iter'] = 1 params['sgd']['batch_size'] = 57 isa.initialize(randn(2, 1000)) isa.train(randn(2, 1000), params) isa = ISA(4, ssize=2) isa.initialize(randn(4, 1000)) isa.train(randn(4, 1000), params) isa = ISA(2, 3) params['gibbs']['ini_iter'] = 2 params['gibbs']['num_iter'] = 2 params['verbosity'] = 0 params['gibbs']['verbosity'] = 0 isa.initialize(randn(2, 1000)) isa.train(randn(2, 1000), params)