예제 #1
0
def test_multioutput_with_diag_q_sqrt():
    data = DataMixedKernel

    q_sqrt_diag = np.ones((data.M, data.L)) * 2
    q_sqrt = np.repeat(np.eye(data.M)[None, ...], data.L,
                       axis=0) * 2  # L x M x M

    kern_list = [SquaredExponential() for _ in range(data.L)]
    k1 = mk.LinearCoregionalization(kern_list, W=data.W)
    f1 = mf.SharedIndependentInducingVariables(
        InducingPoints(data.X[:data.M, ...]))
    model_1 = SVGP(
        k1,
        Gaussian(),
        inducing_variable=f1,
        q_mu=data.mu_data,
        q_sqrt=q_sqrt_diag,
        q_diag=True,
    )

    kern_list = [SquaredExponential() for _ in range(data.L)]
    k2 = mk.LinearCoregionalization(kern_list, W=data.W)
    f2 = mf.SharedIndependentInducingVariables(
        InducingPoints(data.X[:data.M, ...]))
    model_2 = SVGP(
        k2,
        Gaussian(),
        inducing_variable=f2,
        q_mu=data.mu_data,
        q_sqrt=q_sqrt,
        q_diag=False,
    )

    check_equality_predictions(Data.data, [model_1, model_2])
예제 #2
0
def test_MixedKernelSeparateMof():
    data = DataMixedKernel

    kern_list = [SquaredExponential() for _ in range(data.L)]
    inducing_variable_list = [
        InducingPoints(data.X[:data.M, ...]) for _ in range(data.L)
    ]
    k1 = mk.LinearCoregionalization(kern_list, W=data.W)
    f1 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
    model_1 = SVGP(k1,
                   Gaussian(),
                   inducing_variable=f1,
                   q_mu=data.mu_data,
                   q_sqrt=data.sqrt_data)

    kern_list = [SquaredExponential() for _ in range(data.L)]
    inducing_variable_list = [
        InducingPoints(data.X[:data.M, ...]) for _ in range(data.L)
    ]
    k2 = mk.LinearCoregionalization(kern_list, W=data.W)
    f2 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
    model_2 = SVGP(k2,
                   Gaussian(),
                   inducing_variable=f2,
                   q_mu=data.mu_data,
                   q_sqrt=data.sqrt_data)

    check_equality_predictions(Data.data, [model_1, model_2])
예제 #3
0
def test_mixed_mok_with_Id_vs_independent_mok():
    data = DataMixedKernelWithEye
    # Independent model
    k1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), data.L)
    f1 = InducingPoints(data.X[: data.M, ...])
    model_1 = SVGP(k1, Gaussian(), f1, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
    set_trainable(model_1, False)
    set_trainable(model_1.q_sqrt, True)

    gpflow.optimizers.Scipy().minimize(
        model_1.training_loss_closure(Data.data),
        variables=model_1.trainable_variables,
        method="BFGS",
        compile=True,
    )

    # Mixed Model
    kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(data.L)]
    k2 = mk.LinearCoregionalization(kern_list, data.W)
    f2 = InducingPoints(data.X[: data.M, ...])
    model_2 = SVGP(k2, Gaussian(), f2, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
    set_trainable(model_2, False)
    set_trainable(model_2.q_sqrt, True)

    gpflow.optimizers.Scipy().minimize(
        model_2.training_loss_closure(Data.data),
        variables=model_2.trainable_variables,
        method="BFGS",
        compile=True,
    )

    check_equality_predictions(Data.data, [model_1, model_2])
예제 #4
0
def test_sample_conditional_mixedkernel():
    q_mu = tf.random.uniform((Data.M, Data.L), dtype=tf.float64)  # M x L
    q_sqrt = tf.convert_to_tensor(
        [np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.L)]
    )  # L x M x M

    Z = Data.X[: Data.M, ...]  # M x D
    N = int(10e5)
    Xs = np.ones((N, Data.D), dtype=float_type)

    # Path 1: mixed kernel: most efficient route
    W = np.random.randn(Data.P, Data.L)
    mixed_kernel = mk.LinearCoregionalization([SquaredExponential() for _ in range(Data.L)], W)
    optimal_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))

    value, mean, var = sample_conditional(
        Xs, optimal_inducing_variable, mixed_kernel, q_mu, q_sqrt=q_sqrt, white=True
    )

    # Path 2: independent kernels, mixed later
    separate_kernel = mk.SeparateIndependent([SquaredExponential() for _ in range(Data.L)])
    fallback_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))

    value2, mean2, var2 = sample_conditional(
        Xs, fallback_inducing_variable, separate_kernel, q_mu, q_sqrt=q_sqrt, white=True
    )
    value2 = np.matmul(value2, W.T)
    # check if mean and covariance of samples are similar
    np.testing.assert_array_almost_equal(np.mean(value, axis=0), np.mean(value2, axis=0), decimal=1)
    np.testing.assert_array_almost_equal(
        np.cov(value, rowvar=False), np.cov(value2, rowvar=False), decimal=1
    )
예제 #5
0
def test_mixed_shared(fun):
    inducing_variable = mf.SharedIndependentInducingVariables(make_ip())
    kernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)
    if fun is mo_kuus.Kuu:
        t = tf.linalg.cholesky(fun(inducing_variable, kernel, jitter=1e-9))
    else:
        t = fun(inducing_variable, kernel, Datum.Xnew)
        print(t.shape)
예제 #6
0
def test_MixedMok_Kgg():
    data = DataMixedKernel
    kern_list = [SquaredExponential() for _ in range(data.L)]
    kernel = mk.LinearCoregionalization(kern_list, W=data.W)

    Kgg = kernel.Kgg(Data.X, Data.X)  # L x N x N
    Kff = kernel.K(Data.X, Data.X)  # N x P x N x P

    # Kff = W @ Kgg @ W^T
    Kff_infered = np.einsum("lnm,pl,ql->npmq", Kgg, data.W, data.W)

    np.testing.assert_array_almost_equal(Kff, Kff_infered, decimal=5)
예제 #7
0
def test_shapes_of_mok():
    data = DataMixedKernel

    kern_list = [SquaredExponential() for _ in range(data.L)]

    k1 = mk.LinearCoregionalization(kern_list, W=data.W)
    assert k1.num_latent_gps == data.L

    k2 = mk.SeparateIndependent(kern_list)
    assert k2.num_latent_gps == data.L

    dims = 5
    k3 = mk.SharedIndependent(SquaredExponential(), dims)
    assert k3.num_latent_gps == dims
예제 #8
0
def test_conditional_broadcasting(full_cov, white, conditional_type):
    """
    Test that the `conditional` and `sample_conditional` broadcasts correctly
    over leading dimensions of Xnew. Xnew can be shape [..., N, D],
    and conditional should broadcast over the [...].
    """
    q_mu = np.random.randn(Data.M, Data.Dy)
    q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)

    if conditional_type == "Z":
        inducing_variable = Data.Z
        kernel = gpflow.kernels.Matern52(lengthscales=0.5)
    elif conditional_type == "inducing_points":
        inducing_variable = gpflow.inducing_variables.InducingPoints(Data.Z)
        kernel = gpflow.kernels.Matern52(lengthscales=0.5)
    elif conditional_type == "mixing":
        # variational params have different output dim in this case
        q_mu = np.random.randn(Data.M, Data.L)
        q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)
        inducing_variable = mf.SharedIndependentInducingVariables(
            gpflow.inducing_variables.InducingPoints(Data.Z)
        )
        kernel = mk.LinearCoregionalization(
            kernels=[gpflow.kernels.Matern52(lengthscales=0.5) for _ in range(Data.L)], W=Data.W,
        )
    else:
        raise NotImplementedError

    if conditional_type == "mixing" and full_cov:
        pytest.skip("combination is not implemented")

    num_samples = 5

    def sample_conditional_fn(X):
        return sample_conditional(
            X,
            inducing_variable,
            kernel,
            tf.convert_to_tensor(q_mu),
            q_sqrt=tf.convert_to_tensor(q_sqrt),
            white=white,
            full_cov=full_cov,
            num_samples=num_samples,
        )

    samples = np.array([sample_conditional_fn(X)[0] for X in Data.SX])
    means = np.array([sample_conditional_fn(X)[1] for X in Data.SX])
    variables = np.array([sample_conditional_fn(X)[2] for X in Data.SX])

    samples_S12, means_S12, vars_S12 = sample_conditional(
        Data.SX,
        inducing_variable,
        kernel,
        tf.convert_to_tensor(q_mu),
        q_sqrt=tf.convert_to_tensor(q_sqrt),
        white=white,
        full_cov=full_cov,
        num_samples=num_samples,
    )

    samples_S1_S2, means_S1_S2, vars_S1_S2 = sample_conditional(
        Data.S1_S2_X,
        inducing_variable,
        kernel,
        tf.convert_to_tensor(q_mu),
        q_sqrt=tf.convert_to_tensor(q_sqrt),
        white=white,
        full_cov=full_cov,
        num_samples=num_samples,
    )

    assert_allclose(samples_S12.shape, samples.shape)
    assert_allclose(samples_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])
    assert_allclose(means_S12, means)
    assert_allclose(vars_S12, variables)
    assert_allclose(means_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.N, Data.Dy), means)
    if full_cov:
        vars_s1_s2 = vars_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N)
        assert_allclose(vars_s1_s2, variables)
    else:
        vars_s1_s2 = vars_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.N, Data.Dy)
        assert_allclose(vars_s1_s2, variables)
예제 #9
0
def test_kuf_fallback_shared_inducing_variables_shape(inducing_variable):
    kernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)
    Kuf = mo_kufs.Kuf(inducing_variable, kernel, Datum.Xnew)

    assert Kuf.shape == (10, 2, 100, 3)
예제 #10
0

multioutput_inducing_variable_list = [
    mf.SharedIndependentInducingVariables(make_ip()),
    mf.SeparateIndependentInducingVariables(make_ips(Datum.P)),
]

multioutput_fallback_inducing_variable_list = [
    mf.FallbackSharedIndependentInducingVariables(make_ip()),
    mf.FallbackSeparateIndependentInducingVariables(make_ips(Datum.P)),
]

multioutput_kernel_list = [
    mk.SharedIndependent(make_kernel(), Datum.P),
    mk.SeparateIndependent(make_kernels(Datum.L)),
    mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W),
]


@pytest.mark.parametrize("inducing_variable",
                         multioutput_inducing_variable_list)
@pytest.mark.parametrize("kernel", multioutput_kernel_list)
def test_kuu_shape(inducing_variable, kernel):
    Kuu = mo_kuus.Kuu(inducing_variable, kernel, jitter=1e-9)
    t = tf.linalg.cholesky(Kuu)

    if isinstance(kernel, mk.SharedIndependent):
        if isinstance(inducing_variable,
                      mf.SeparateIndependentInducingVariables):
            assert t.shape == (3, 10, 10)
        else: