Example #1
0
def test_pareto_hypercell_bounds_raises_for_anti_reference_with_invalid_shape(
    anti_reference: SequenceN[float],
) -> None:
    pareto = Pareto(tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]]))

    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        pareto.hypercell_bounds(tf.constant(anti_reference), tf.constant([0.0, 0.0]))
Example #2
0
def test_pareto_hypercell_bounds_raises_for_front_below_anti_reference_point(
    anti_reference: list[float],
) -> None:
    pareto = Pareto(tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]]))

    with pytest.raises(tf.errors.InvalidArgumentError):
        pareto.hypercell_bounds(tf.constant(anti_reference), tf.constant([10.0, 10.0]))
Example #3
0
def test_pareto_hypercell_bounds(
    objectives: SequenceN[float],
    anti_reference: list[float],
    reference: list[float],
    expected: SequenceN[float],
):
    pareto = Pareto(tf.constant(objectives))
    npt.assert_allclose(
        pareto.hypercell_bounds(tf.constant(anti_reference), tf.constant(reference))[0],
        tf.constant(expected[0]),
    )
    npt.assert_allclose(
        pareto.hypercell_bounds(tf.constant(anti_reference), tf.constant(reference))[1],
        tf.constant(expected[1]),
    )
Example #4
0
def test_expected_hypervolume_improvement(
    input_dim: int,
    num_samples_per_point: int,
    existing_observations: tf.Tensor,
    obj_num: int,
    variance_scale: float,
) -> None:
    # Note: the test data number grows exponentially with num of obj
    data_num_seg_per_dim = 2  # test data number per input dim
    N = data_num_seg_per_dim**input_dim
    xs = tf.convert_to_tensor(
        list(
            itertools.product(
                *[list(tf.linspace(-1, 1, data_num_seg_per_dim))] *
                input_dim)))

    xs = tf.cast(xs, dtype=existing_observations.dtype)
    model = _mo_test_model(obj_num, *[variance_scale] * obj_num)
    mean, variance = model.predict(xs)

    predict_samples = tfp.distributions.Normal(mean, tf.sqrt(variance)).sample(
        num_samples_per_point  # [f_samples, batch_size, obj_num]
    )
    _pareto = Pareto(existing_observations)
    ref_pt = get_reference_point(_pareto.front)
    lb_points, ub_points = _pareto.hypercell_bounds(
        tf.constant([-math.inf] * ref_pt.shape[-1]), ref_pt)

    # calc MC approx EHVI
    splus_valid = tf.reduce_all(
        tf.tile(ub_points[tf.newaxis, :, tf.newaxis, :],
                [num_samples_per_point, 1, N, 1]) > tf.expand_dims(
                    predict_samples, axis=1),
        axis=-1,  # can predict_samples contribute to hvi in cell
    )  # [f_samples, num_cells,  B]
    splus_idx = tf.expand_dims(tf.cast(splus_valid, dtype=ub_points.dtype), -1)
    splus_lb = tf.tile(lb_points[tf.newaxis, :, tf.newaxis, :],
                       [num_samples_per_point, 1, N, 1])
    splus_lb = tf.maximum(  # max of lower bounds and predict_samples
        splus_lb, tf.expand_dims(predict_samples, 1))
    splus_ub = tf.tile(ub_points[tf.newaxis, :, tf.newaxis, :],
                       [num_samples_per_point, 1, N, 1])
    splus = tf.concat(  # concatenate validity labels and possible improvements
        [splus_idx, splus_ub - splus_lb],
        axis=-1)

    # calculate hyper-volume improvement over the non-dominated cells
    ehvi_approx = tf.transpose(
        tf.reduce_sum(tf.reduce_prod(splus, axis=-1), axis=1, keepdims=True))
    ehvi_approx = tf.reduce_mean(ehvi_approx,
                                 axis=-1)  # average through mc sample

    ehvi = expected_hv_improvement(model, _pareto,
                                   ref_pt)(tf.expand_dims(xs, -2))

    npt.assert_allclose(ehvi, ehvi_approx, rtol=0.01, atol=0.01)