Пример #1
0
def assert_gpr_vs_vgp(
        m1: tf.Module,
        m2: tf.Module,
        gamma: float = 1.0,
        maxiter: int = 1,
        xi_transform: Optional[gpflow.optimizers.natgrad.XiTransform] = None):
    assert maxiter >= 1

    m2_ll_before = m2.log_likelihood()
    m1_ll_before = m1.log_likelihood()

    assert m2_ll_before != m1_ll_before

    @tf.function(autograph=False)
    def loss_cb() -> tf.Tensor:
        return -m2.log_marginal_likelihood()

    params = (m2.q_mu, m2.q_sqrt)
    if xi_transform is not None:
        params += (xi_transform, )

    opt = NaturalGradient(gamma)

    @tf.function(autograph=False)
    def minimize_step():
        opt.minimize(loss_cb, var_list=[params])

    for _ in range(maxiter):
        minimize_step()

    m2_ll_after = m2.log_likelihood()
    m1_ll_after = m1.log_likelihood()

    np.testing.assert_allclose(m1_ll_after, m2_ll_after, atol=1e-4)
Пример #2
0
def assert_sgpr_vs_svgp(m1: tf.Module, m2: tf.Module):
    data = m1.data

    m1_ll_before = m1.log_likelihood()
    m2_ll_before = m2.log_likelihood(data[0], data[1])

    assert m2_ll_before != m1_ll_before

    @tf.function(autograph=False)
    def loss_cb() -> tf.Tensor:
        return -m2.log_marginal_likelihood(data[0], data[1])

    params = [(m2.q_mu, m2.q_sqrt)]
    opt = NaturalGradient(1.)
    opt.minimize(loss_cb, var_list=params)

    m1_ll_after = m1.log_likelihood()
    m2_ll_after = m2.log_likelihood(data[0], data[1])

    np.testing.assert_allclose(m1_ll_after, m2_ll_after, atol=1e-4)