def test_factor(self): opt, train = util.build_factor('tester') assert isinstance(opt, Tester) # run the optimizer for two iterations. check that the u and v values # (being monitored) are reasonable at the start. for i, (tm, vm) in enumerate(opt.iterate(train)): assert abs(vm['u<1'] - 0.001) < 1e-5 assert vm['u<-1'] == 0 assert vm['v<1'] == 1 assert vm['v<-1'] == 0 if i == 2: break
def test_sgd(self): assert isinstance(util.build_rosen('sgd')[0], downhill.SGD) assert isinstance(util.build_factor('sgd')[0], downhill.SGD)
def test_factor(self): util.assert_progress(*util.build_factor('nag'), max_gradient_elem=1)
def test_factor_nesterov(self): util.assert_progress( *util.build_factor('sgd'), max_gradient_norm=1)
def test_factor(self): util.assert_progress( *util.build_factor('sgd'), max_gradient_elem=1, nesterov=False)
def test_factor(self): util.assert_progress(*util.build_factor('sgd'), max_gradient_elem=1, nesterov=False)
def test_factor(self): util.assert_progress(*util.build_factor('esgd'), learning_rate=1e-6)
def test_factor_nesterov(self): util.assert_progress(*util.build_factor('sgd'), max_gradient_norm=1)
def test_factor(self): util.assert_progress(*util.build_factor('adam'))
def test_factor(self): util.assert_progress(*util.build_factor('rmsprop'))
def test_factor(self): util.assert_progress(*util.build_factor('adagrad'))