Beispiel #1
0
def test_interior_vs_bbox_gives_same_result():
    f = generate_random_parametrization(ring_of_fire)

    control = LearnerND(f, bounds=[(-1, 1), (-1, 1)])
    hull = scipy.spatial.ConvexHull(control._bounds_points)
    learner = LearnerND(f, bounds=hull)

    simple(control, goal=lambda l: l.loss() < 0.1)
    simple(learner, goal=lambda l: l.loss() < 0.1)

    assert learner.data == control.data
def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number):
    loss = curvature_loss_function()
    learner = LearnerND(ring_of_fire,
                        bounds=[(-1, 1), (-1, 1)],
                        loss_per_simplex=loss)
    BlockingRunner(learner, goal=lambda l: l.npoints >= 10)
    assert learner.npoints >= 10
def test_learnerND_curvature_runs_to_10_points():
    loss = curvature_loss_function()
    learner = LearnerND(ring_of_fire,
                        bounds=[(-1, 1), (-1, 1)],
                        loss_per_simplex=loss)
    SimpleRunner(learner, goal=lambda l: l.npoints >= 10)
    assert learner.npoints == 10
Beispiel #4
0
def test_vector_return_with_a_flat_layer():
    f = generate_random_parametrization(ring_of_fire)
    g = generate_random_parametrization(ring_of_fire)
    h1 = lambda xy: np.array([f(xy), g(xy)])  # noqa: E731
    h2 = lambda xy: np.array([f(xy), 0 * g(xy)])  # noqa: E731
    h3 = lambda xy: np.array([0 * f(xy), g(xy)])  # noqa: E731
    for function in [h1, h2, h3]:
        learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)])
        simple(learner, goal=lambda l: l.loss() < 0.1)
Beispiel #5
0
def test_faiure_case_LearnerND():
    log = [('ask', 4), ('tell', (-1, -1, -1), 1.607873907219222e-101),
           ('tell', (-1, -1, 1), 1.607873907219222e-101), ('ask', 2),
           ('tell', (-1, 1, -1), 1.607873907219222e-101),
           ('tell', (-1, 1, 1), 1.607873907219222e-101), ('ask', 2),
           ('tell', (1, -1, 1), 2.0), ('tell', (1, -1, -1), 2.0), ('ask', 2),
           ('tell', (0.0, 0.0, 0.0), 4.288304431237686e-06),
           ('tell', (1, 1, -1), 2.0)]
    learner = LearnerND(lambda *x: x, bounds=[(-1, 1), (-1, 1), (-1, 1)])
    replay_log(learner, log)
def test_learnerND_log_works():
    loss = curvature_loss_function()
    learner = LearnerND(ring_of_fire,
                        bounds=[(-1, 1), (-1, 1)],
                        loss_per_simplex=loss)
    learner.ask(4)
    learner.tell((-1, -1), -1.0)
    learner.ask(1)
    learner.tell((-1, 1), -1.0)
    learner.tell((1, -1), 1.0)
    learner.ask(2)
def test_learnerND_runs_to_10_points_Blocking(execution_number):
    learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)])
    BlockingRunner(learner, goal=lambda l: l.npoints >= 10)
    assert learner.npoints >= 10
def test_learnerND_runs_to_10_points():
    learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)])
    SimpleRunner(learner, goal=lambda l: l.npoints >= 10)
    assert learner.npoints == 10