Esempio n. 1
0
def test_saving_with_datasaver(learner_type, f, learner_kwargs):
    f = generate_random_parametrization(f)
    g = lambda x: {"y": f(x), "t": random.random()}  # noqa: E731
    arg_picker = operator.itemgetter("y")
    learner = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
    control = DataSaver(learner_type(g, **learner_kwargs), arg_picker)

    if learner_type is Learner1D:
        learner.learner._recompute_losses_factor = 1
        control.learner._recompute_losses_factor = 1

    simple(learner, lambda l: l.npoints > 100)
    fd, path = tempfile.mkstemp()
    try:
        learner.save(path)
        control.load(path)

        np.testing.assert_almost_equal(learner.loss(), control.loss())

        assert learner.extra_data == control.extra_data

        # Try if the control is runnable
        simple(control, lambda l: l.npoints > 200)
    finally:
        os.remove(path)
Esempio n. 2
0
def test_serialization_for(learner_type, learner_kwargs, serializer, f):
    """Test serializing a learner using different serializers."""

    learner = learner_type(f, **learner_kwargs)

    simple(learner, goal_1)
    learner_bytes = serializer.dumps(learner)
    loss = learner.loss()
    asked = learner.ask(10)
    data = learner.data

    del f
    del learner

    learner_loaded = serializer.loads(learner_bytes)
    assert learner_loaded.npoints == 10
    assert loss == learner_loaded.loss()
    assert data == learner_loaded.data

    assert asked == learner_loaded.ask(10)

    # load again to undo the ask
    learner_loaded = serializer.loads(learner_bytes)

    simple(learner_loaded, goal_2)
    assert learner_loaded.npoints == 20
Esempio n. 3
0
def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
    f = generate_random_parametrization(f)
    learner = BalancingLearner([learner_type(f, **learner_kwargs)])
    control = BalancingLearner([learner_type(f, **learner_kwargs)])

    if learner_type is Learner1D:
        for l, c in zip(learner.learners, control.learners):
            l._recompute_losses_factor = 1
            c._recompute_losses_factor = 1

    simple(learner, lambda l: l.learners[0].npoints > 100)
    folder = tempfile.mkdtemp()

    def fname(learner):
        return folder + "test"

    try:
        learner.save(fname=fname)
        control.load(fname=fname)

        np.testing.assert_almost_equal(learner.loss(), control.loss())

        # Try if the control is runnable
        simple(control, lambda l: l.learners[0].npoints > 200)
    finally:
        shutil.rmtree(folder)
Esempio n. 4
0
def test_curvature_loss_vectors():
    def f(x):
        return np.tanh(20 * x), np.tanh(20 * (x - 0.4))

    loss = curvature_loss_function()
    assert loss.nth_neighbors == 1
    learner = Learner1D(f, (-1, 1), loss_per_interval=loss)
    simple(learner, goal=lambda l: l.npoints > 100)
    assert learner.npoints > 100
Esempio n. 5
0
def test_vector_return_with_a_flat_layer():
    f = generate_random_parametrization(ring_of_fire)
    g = generate_random_parametrization(ring_of_fire)
    h1 = lambda xy: np.array([f(xy), g(xy)])  # noqa: E731
    h2 = lambda xy: np.array([f(xy), 0 * g(xy)])  # noqa: E731
    h3 = lambda xy: np.array([0 * f(xy), g(xy)])  # noqa: E731
    for function in [h1, h2, h3]:
        learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)])
        simple(learner, goal=lambda l: l.loss() < 0.1)
Esempio n. 6
0
def test_NaN_loss():
    # see https://github.com/python-adaptive/adaptive/issues/145
    def f(x):
        a = 0.01
        if random.random() < 0.2:
            return np.NaN
        return x + a**2 / (a**2 + x**2)

    learner = Learner1D(f, bounds=(-1, 1))
    simple(learner, lambda l: l.npoints > 100)
Esempio n. 7
0
def test_interior_vs_bbox_gives_same_result():
    f = generate_random_parametrization(ring_of_fire)

    control = LearnerND(f, bounds=[(-1, 1), (-1, 1)])
    hull = scipy.spatial.ConvexHull(control._bounds_points)
    learner = LearnerND(f, bounds=hull)

    simple(control, goal=lambda l: l.loss() < 0.1)
    simple(learner, goal=lambda l: l.loss() < 0.1)

    assert learner.data == control.data
Esempio n. 8
0
def test_min_npoints():
    def constant_function(seed):
        return 0.1

    for min_npoints in [1, 2, 3]:
        learner = AverageLearner(constant_function,
                                 atol=0.01,
                                 rtol=0.01,
                                 min_npoints=min_npoints)
        simple(learner, lambda l: l.loss() < 1)
        assert learner.npoints >= max(2, min_npoints)
Esempio n. 9
0
def test_loss_at_machine_precision_interval_is_zero():
    """The loss of an interval smaller than _dx_eps
    should be set to zero."""
    def f(x):
        return 1 if x == 0 else 0

    def goal(l):
        return l.loss() < 0.01 or l.npoints >= 1000

    learner = Learner1D(f, bounds=(-1, 1))
    simple(learner, goal=goal)

    # this means loss < 0.01 was reached
    assert learner.npoints != 1000
Esempio n. 10
0
def simple_run(learner, n):
    def get_goal(learner):
        if hasattr(learner, "nsamples"):
            return lambda l: l.nsamples > n
        else:
            return lambda l: l.npoints > n

    def goal():
        if isinstance(learner, BalancingLearner):
            return get_goal(learner.learners[0])
        elif isinstance(learner, DataSaver):
            return get_goal(learner.learner)
        return get_goal(learner)

    simple(learner, goal())
Esempio n. 11
0
def test_saving(learner_type, f, learner_kwargs):
    f = generate_random_parametrization(f)
    learner = learner_type(f, **learner_kwargs)
    control = learner_type(f, **learner_kwargs)
    if learner_type is Learner1D:
        learner._recompute_losses_factor = 1
        control._recompute_losses_factor = 1
    simple(learner, lambda l: l.npoints > 100)
    fd, path = tempfile.mkstemp()
    try:
        learner.save(path)
        control.load(path)

        np.testing.assert_almost_equal(learner.loss(), control.loss())

        # Try if the control is runnable
        simple(control, lambda l: l.npoints > 200)
    finally:
        os.remove(path)
Esempio n. 12
0
def test_tell_many():
    def f(x, offset=0.123214):
        a = 0.01
        return (np.sin(x**2) + np.sin(x**5) + a**2 / (a**2 + (x - offset)**2) +
                x**2 + 1e-5 * x**3)

    def f_vec(x, offset=0.123214):
        a = 0.01
        y = x + a**2 / (a**2 + (x - offset)**2)
        return [y, 0.5 * y, y**2]

    def assert_equal_dicts(d1, d2):
        xs1, ys1 = zip(*sorted(d1.items()))
        xs2, ys2 = zip(*sorted(d2.items()))
        ys1 = np.array(ys1, dtype=np.float)
        ys2 = np.array(ys2, dtype=np.float)
        np.testing.assert_almost_equal(xs1, xs2)
        np.testing.assert_almost_equal(ys1, ys2)

    def test_equal(l1, l2):
        assert_equal_dicts(l1.neighbors, l2.neighbors)
        assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined)
        assert_equal_dicts(l1.data, l2.data)
        assert_equal_dicts(l2.losses, l1.losses)
        assert_equal_dicts(l2.losses_combined, l1.losses_combined)
        np.testing.assert_almost_equal(sorted(l1.pending_points),
                                       sorted(l2.pending_points))
        np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1])
        assert l1._scale == l2._scale
        assert l1._bbox[0] == l2._bbox[0]

    for function in [f, f_vec]:
        learner = Learner1D(function, bounds=(-1, 1))
        learner2 = Learner1D(function, bounds=(-1, 1))
        simple(learner, goal=lambda l: l.npoints > 200)
        xs, ys = zip(*learner.data.items())

        # Make the scale huge to no get a scale doubling
        x = 1e-6
        max_value = 1e6 if learner.vdim == 1 else np.array(learner.vdim *
                                                           [1e6])
        learner.tell(x, max_value)
        learner2.tell(x, max_value)

        for x in xs:
            learner2.tell_pending(x)

        learner2.tell_many(xs, ys)
        test_equal(learner, learner2)

    # Test non-determinism. We keep a list of points that will be
    # evaluated later to emulate parallel execution.
    def _random_run(learner, learner2, scale_doubling=True):
        if not scale_doubling:
            # Make the scale huge to no get a scale doubling
            x = 1e-6
            max_value = 1e6
            learner.tell(x, max_value)
            learner2.tell(x, max_value)

        stash = []
        for i in range(10):
            xs, _ = learner.ask(10)
            for x in xs:
                learner2.tell_pending(x)

            # Save 5 random points out of `xs` for later
            random.shuffle(xs)
            for _ in range(5):
                stash.append(xs.pop())

            ys = [learner.function(x) for x in xs]

            learner.tell_many(xs, ys, force=True)
            for x, y in zip(xs, ys):
                learner2.tell(x, y)

            # Evaluate and add N random points from `stash`
            random.shuffle(stash)
            xs = [stash.pop() for _ in range(random.randint(1, 5))]
            ys = [learner.function(x) for x in xs]

            learner.tell_many(xs, ys, force=True)
            for x, y in zip(xs, ys):
                learner2.tell(x, y)

        if scale_doubling:
            # Double the scale to trigger the loss updates
            max_value = max(learner.data.values())
            x = 1e-6
            learner.tell(x, max_value * 10)
            learner2.tell(x, max_value * 10)

    learner = Learner1D(f, bounds=(-1, 1))
    learner2 = Learner1D(f, bounds=(-1, 1))
    _random_run(learner, learner2, scale_doubling=False)
    test_equal(learner, learner2)

    learner = Learner1D(f, bounds=(-1, 1))
    learner2 = Learner1D(f, bounds=(-1, 1))
    _random_run(learner, learner2, scale_doubling=True)
    test_equal(learner, learner2)
Esempio n. 13
0
def test_learner_accepts_lists(learner_type, bounds):
    def f(x):
        return [0, 1]

    learner = learner_type(f, bounds=bounds)
    simple(learner, goal=lambda l: l.npoints > 10)
Esempio n. 14
0
def test_strategies(strategy, goal):
    learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
    learner = BalancingLearner(learners, strategy=strategy)
    simple(learner, goal=goal)