def test_saving_of_balancing_learner(learner_type, f, learner_kwargs): f = generate_random_parametrization(f) learner = BalancingLearner([learner_type(f, **learner_kwargs)]) control = BalancingLearner([learner_type(f, **learner_kwargs)]) if learner_type is Learner1D: for l, c in zip(learner.learners, control.learners): l._recompute_losses_factor = 1 c._recompute_losses_factor = 1 simple(learner, lambda l: l.learners[0].npoints > 100) folder = tempfile.mkdtemp() def fname(learner): return folder + "test" try: learner.save(fname=fname) control.load(fname=fname) np.testing.assert_almost_equal(learner.loss(), control.loss()) # Try if the control is runnable simple(control, lambda l: l.learners[0].npoints > 200) finally: shutil.rmtree(folder)
def test_balancing_learner_loss_cache(): learner = Learner1D(lambda x: x, bounds=(-1, 1)) learner.tell(-1, -1) learner.tell(1, 1) learner.tell_pending(0) real_loss = learner.loss(real=True) pending_loss = learner.loss(real=False) # Test if the real and pending loss are cached correctly bl = BalancingLearner([learner]) assert bl.loss(real=True) == real_loss assert bl.loss(real=False) == pending_loss # Test if everything is still fine when executed in the reverse order bl = BalancingLearner([learner]) assert bl.loss(real=False) == pending_loss assert bl.loss(real=True) == real_loss