Пример #1
0
def test_distributed_executor(dask_executor):
    learner = Learner1D(linear, (-1, 1))
    BlockingRunner(learner, trivial_goal, executor=dask_executor)
    assert learner.npoints > 0
Пример #2
0
def test_concurrent_futures_executor():
    from concurrent.futures import ProcessPoolExecutor
    BlockingRunner(Learner1D(linear, (-1, 1)),
                   trivial_goal,
                   executor=ProcessPoolExecutor(max_workers=1))
Пример #3
0
def test_ask_does_not_return_known_points_when_returning_bounds():
    learner = Learner1D(lambda x: None, (-1, 1))
    learner.tell(0, 0)
    points, _ = learner.ask(3)
    assert 0 not in points
Пример #4
0
def test_tell_many():
    def f(x, offset=0.123214):
        a = 0.01
        return (np.sin(x**2) + np.sin(x**5) + a**2 / (a**2 + (x - offset)**2) +
                x**2 + 1e-5 * x**3)

    def f_vec(x, offset=0.123214):
        a = 0.01
        y = x + a**2 / (a**2 + (x - offset)**2)
        return [y, 0.5 * y, y**2]

    def assert_equal_dicts(d1, d2):
        xs1, ys1 = zip(*sorted(d1.items()))
        xs2, ys2 = zip(*sorted(d2.items()))
        ys1 = np.array(ys1, dtype=np.float)
        ys2 = np.array(ys2, dtype=np.float)
        np.testing.assert_almost_equal(xs1, xs2)
        np.testing.assert_almost_equal(ys1, ys2)

    def test_equal(l1, l2):
        assert_equal_dicts(l1.neighbors, l2.neighbors)
        assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined)
        assert_equal_dicts(l1.data, l2.data)
        assert_equal_dicts(l2.losses, l1.losses)
        assert_equal_dicts(l2.losses_combined, l1.losses_combined)
        np.testing.assert_almost_equal(sorted(l1.pending_points),
                                       sorted(l2.pending_points))
        np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1])
        assert l1._scale == l2._scale
        assert l1._bbox[0] == l2._bbox[0]

    for function in [f, f_vec]:
        learner = Learner1D(function, bounds=(-1, 1))
        learner2 = Learner1D(function, bounds=(-1, 1))
        simple(learner, goal=lambda l: l.npoints > 200)
        xs, ys = zip(*learner.data.items())

        # Make the scale huge to no get a scale doubling
        x = 1e-6
        max_value = 1e6 if learner.vdim == 1 else np.array(learner.vdim *
                                                           [1e6])
        learner.tell(x, max_value)
        learner2.tell(x, max_value)

        for x in xs:
            learner2.tell_pending(x)

        learner2.tell_many(xs, ys)
        test_equal(learner, learner2)

    # Test non-determinism. We keep a list of points that will be
    # evaluated later to emulate parallel execution.
    def _random_run(learner, learner2, scale_doubling=True):
        if not scale_doubling:
            # Make the scale huge to no get a scale doubling
            x = 1e-6
            max_value = 1e6
            learner.tell(x, max_value)
            learner2.tell(x, max_value)

        stash = []
        for i in range(10):
            xs, _ = learner.ask(10)
            for x in xs:
                learner2.tell_pending(x)

            # Save 5 random points out of `xs` for later
            random.shuffle(xs)
            for _ in range(5):
                stash.append(xs.pop())

            ys = [learner.function(x) for x in xs]

            learner.tell_many(xs, ys, force=True)
            for x, y in zip(xs, ys):
                learner2.tell(x, y)

            # Evaluate and add N random points from `stash`
            random.shuffle(stash)
            xs = [stash.pop() for _ in range(random.randint(1, 5))]
            ys = [learner.function(x) for x in xs]

            learner.tell_many(xs, ys, force=True)
            for x, y in zip(xs, ys):
                learner2.tell(x, y)

        if scale_doubling:
            # Double the scale to trigger the loss updates
            max_value = max(learner.data.values())
            x = 1e-6
            learner.tell(x, max_value * 10)
            learner2.tell(x, max_value * 10)

    learner = Learner1D(f, bounds=(-1, 1))
    learner2 = Learner1D(f, bounds=(-1, 1))
    _random_run(learner, learner2, scale_doubling=False)
    test_equal(learner, learner2)

    learner = Learner1D(f, bounds=(-1, 1))
    learner2 = Learner1D(f, bounds=(-1, 1))
    _random_run(learner, learner2, scale_doubling=True)
    test_equal(learner, learner2)
Пример #5
0
def test_ipyparallel_executor(ipyparallel_executor):
    learner = Learner1D(linear, (-1, 1))
    BlockingRunner(learner, trivial_goal, executor=ipyparallel_executor)
    assert learner.npoints > 0
Пример #6
0
def test_stop_after_goal():
    seconds_to_wait = 0.2  # don't make this too large or the test will take ages
    start_time = time.time()
    BlockingRunner(Learner1D(linear, (-1, 1)), stop_after(seconds=seconds_to_wait))
    stop_time = time.time()
    assert stop_time - start_time > seconds_to_wait
Пример #7
0
def test_strategies(strategy, goal):
    learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
    learner = BalancingLearner(learners, strategy=strategy)
    simple(learner, goal=goal)
Пример #8
0
def test_ask_0(strategy):
    learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
    learner = BalancingLearner(learners, strategy=strategy)
    points, _ = learner.ask(0)
    assert len(points) == 0
Пример #9
0
def test_default_executor():
    learner = Learner1D(linear, (-1, 1))
    runner = AsyncRunner(learner, goal=lambda l: l.npoints > 10)
    asyncio.get_event_loop().run_until_complete(runner.task)