예제 #1
0
def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
    f = generate_random_parametrization(f)
    learner = BalancingLearner([learner_type(f, **learner_kwargs)])
    control = BalancingLearner([learner_type(f, **learner_kwargs)])

    if learner_type is Learner1D:
        for l, c in zip(learner.learners, control.learners):
            l._recompute_losses_factor = 1
            c._recompute_losses_factor = 1

    simple(learner, lambda l: l.learners[0].npoints > 100)
    folder = tempfile.mkdtemp()

    def fname(learner):
        return folder + "test"

    try:
        learner.save(fname=fname)
        control.load(fname=fname)

        np.testing.assert_almost_equal(learner.loss(), control.loss())

        # Try if the control is runnable
        simple(control, lambda l: l.learners[0].npoints > 200)
    finally:
        shutil.rmtree(folder)
예제 #2
0
def test_balancing_learner(learner_type, f, learner_kwargs):
    """Test if the BalancingLearner works with the different types of learners."""
    learners = [
        learner_type(generate_random_parametrization(f), **learner_kwargs)
        for i in range(4)
    ]

    learner = BalancingLearner(learners)

    # Emulate parallel execution
    stash = []

    for i in range(100):
        n = random.randint(1, 10)
        m = random.randint(0, n)
        xs, _ = learner.ask(n, tell_pending=False)

        # Save 'm' random points out of `xs` for later
        random.shuffle(xs)
        for _ in range(m):
            stash.append(xs.pop())

        for x in xs:
            learner.tell(x, learner.function(x))

        # Evaluate and add 'm' random points from `stash`
        random.shuffle(stash)
        for _ in range(m):
            x = stash.pop()
            learner.tell(x, learner.function(x))

    assert all(l.npoints > 10 for l in learner.learners), [
        l.npoints for l in learner.learners
    ]
예제 #3
0
def test_balancing_learner_loss_cache():
    learner = Learner1D(lambda x: x, bounds=(-1, 1))
    learner.tell(-1, -1)
    learner.tell(1, 1)
    learner.tell_pending(0)

    real_loss = learner.loss(real=True)
    pending_loss = learner.loss(real=False)

    # Test if the real and pending loss are cached correctly
    bl = BalancingLearner([learner])
    assert bl.loss(real=True) == real_loss
    assert bl.loss(real=False) == pending_loss

    # Test if everything is still fine when executed in the reverse order
    bl = BalancingLearner([learner])
    assert bl.loss(real=False) == pending_loss
    assert bl.loss(real=True) == real_loss
예제 #4
0
def test_distribute_first_points_over_learners(strategy):
    for initial_points in [0, 3]:
        learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
        learner = BalancingLearner(learners, strategy=strategy)

        points = learner.ask(initial_points)[0]
        learner.tell_many(points, points)

        points, _ = learner.ask(100)
        i_learner, xs = zip(*points)
        # assert that are all learners in the suggested points
        assert len(set(i_learner)) == len(learners)
예제 #5
0
def test_strategies(strategy, goal):
    learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
    learner = BalancingLearner(learners, strategy=strategy)
    simple(learner, goal=goal)
예제 #6
0
def test_ask_0(strategy):
    learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
    learner = BalancingLearner(learners, strategy=strategy)
    points, _ = learner.ask(0)
    assert len(points) == 0
예제 #7
0
def balancing_learner(f, learner_type, learner_kwargs):
    learner_1 = learner_type(f, **learner_kwargs)
    learner_2 = learner_type(f, **learner_kwargs)
    return BalancingLearner([learner_1, learner_2])