示例#1
0
def build_deep_gp(input_dim, num_data):
    layers = [input_dim, 2, 2, 1]
    # Below are different ways to build layers

    # 1. Pass in Lists:
    kernel_list = [RBF(), Matern12()]
    num_inducing = [25, 25]
    l1_kernel = construct_basic_kernel(kernels=kernel_list)
    l1_inducing = construct_basic_inducing_variables(num_inducing=num_inducing, input_dim=layers[0])

    # 2. Pass in kernels, specificy output dims (shared hyperparams/variables)
    l2_kernel = construct_basic_kernel(kernels=RBF(), output_dim=layers[2], share_hyperparams=True)
    l2_inducing = construct_basic_inducing_variables(
        num_inducing=25, input_dim=layers[1], share_variables=True
    )

    # 3. Pass in kernels, specificy output dims (independent hyperparams/vars)
    # By default and the constructor will make indep. copies
    l3_kernel = construct_basic_kernel(kernels=RBF(), output_dim=layers[3])
    l3_inducing = construct_basic_inducing_variables(
        num_inducing=25, input_dim=layers[2], output_dim=layers[3]
    )

    # Assemble at the end
    gp_layers = [
        GPLayer(l1_kernel, l1_inducing, num_data),
        GPLayer(l2_kernel, l2_inducing, num_data),
        GPLayer(l3_kernel, l3_inducing, num_data, mean_function=Zero()),
    ]
    return DeepGP(gp_layers, Gaussian(0.1))
示例#2
0
def make_inducing_variables(num_latent_iv):
    return [
        construct_basic_inducing_variables(
            num_inducing=[num_inducing for _ in range(num_latent_iv)],
            input_dim=input_dim,
        ),
        construct_basic_inducing_variables(num_inducing=num_inducing,
                                           input_dim=input_dim,
                                           output_dim=num_latent_iv),
    ]
def build_gp_layers(layer_sizes, num_data):
    gp_layers = []
    for input_dim, output_dim in zip(layer_sizes[:-1], layer_sizes[1:]):

        kernel = construct_basic_kernel(kernels=RBF(), output_dim=output_dim)
        inducing_vars = construct_basic_inducing_variables(
            num_inducing=25, input_dim=input_dim, output_dim=output_dim)

        layer = GPLayer(kernel, inducing_vars, num_data)
        gp_layers.append(layer)

    gp_layers[-1].mean_function = Zero()

    return gp_layers
示例#4
0
def setup_gp_layer_and_data(num_inducing: int, **gp_layer_kwargs):
    input_dim = 30
    output_dim = 5
    num_data = 100
    data = make_data(input_dim, output_dim, num_data=num_data)

    kernel = construct_basic_kernel(RBF(), output_dim)
    inducing_vars = construct_basic_inducing_variables(num_inducing, input_dim,
                                                       output_dim)
    mean_function = Zero(output_dim)

    gp_layer = GPLayer(kernel,
                       inducing_vars,
                       num_data,
                       mean_function=mean_function,
                       **gp_layer_kwargs)
    return gp_layer, data
示例#5
0
def test_construct_inducing_separate_independent_custom_list(z_init):
    num_inducing = [25, 35, 45]
    input_dim = 5

    if z_init:
        z_init = [
            xavier_initialization_numpy(m, input_dim) for m in num_inducing
        ]
    else:
        z_init = None

    moiv = construct_basic_inducing_variables(num_inducing,
                                              input_dim,
                                              z_init=z_init)

    assert isinstance(moiv, SeparateIndependentInducingVariables)
    assert isinstance(moiv, MultioutputInducingVariables)
    for i, iv in enumerate(moiv.inducing_variable_list):
        assert len(iv) == num_inducing[i]
示例#6
0
def test_construct_inducing_shared_independent_duplicates(z_init):
    num_inducing = 25
    input_dim = 5
    output_dim = 7

    if z_init:
        z_init = np.random.randn(num_inducing, input_dim)
    else:
        z_init = None

    moiv = construct_basic_inducing_variables(num_inducing,
                                              input_dim,
                                              output_dim=output_dim,
                                              share_variables=True,
                                              z_init=z_init)

    assert isinstance(moiv, SharedIndependentInducingVariables)
    assert isinstance(moiv, MultioutputInducingVariables)
    assert len(moiv.inducing_variable) == num_inducing
示例#7
0
def test_construct_inducing_separate_independent_duplicates(z_init):
    num_inducing = 25
    input_dim = 5
    output_dim = 7

    if z_init:
        z_init = np.random.randn(output_dim, num_inducing, input_dim)
    else:
        z_init = None

    moiv = construct_basic_inducing_variables(num_inducing,
                                              input_dim,
                                              output_dim=output_dim,
                                              z_init=z_init)

    assert isinstance(moiv, SeparateIndependentInducingVariables)
    assert isinstance(moiv, MultioutputInducingVariables)
    for iv in moiv.inducing_variable_list:
        assert len(iv) == num_inducing
示例#8
0
def test_verify_compatibility_type_errors():
    valid_inducing_variable = construct_basic_inducing_variables([35],
                                                                 input_dim=40)
    valid_kernel = construct_basic_kernel([Matern52()])
    valid_mean_function = Zero(
    )  # all gpflow mean functions are currently valid

    with pytest.raises(GPLayerIncompatibilityException
                       ):  # gpflow kernels must be MultioutputKernels
        verify_compatibility(Matern52(), valid_mean_function,
                             valid_inducing_variable)

    Z = valid_inducing_variable.inducing_variable_list[0].Z
    inducing_variable = InducingPoints(Z)
    with pytest.raises(
            GPLayerIncompatibilityException
    ):  # gpflow inducing_variables must be MultioutputInducingVariables
        verify_compatibility(valid_kernel, valid_mean_function,
                             inducing_variable)
示例#9
0
    def __init__(
        self,
        likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.
        Gaussian(0.01)
    ) -> None:
        kernel = construct_basic_kernel(gpflow.kernels.SquaredExponential(),
                                        output_dim=1,
                                        share_hyperparams=True)
        inducing_var = construct_basic_inducing_variables(
            num_inducing=5,
            input_dim=1,
            share_variables=True,
            z_init=tf.random.normal([5, 1], dtype=gpflow.default_float()),
        )

        gp_layer = GPLayer(kernel, inducing_var, 10)

        super().__init__(
            [gp_layer],  # not actually used
            likelihood,
        )
示例#10
0
# ## Constructing the layers
#
# Note that we give the `full_cov=True` argument to `GPLayer` so that we obtain correlated samples.
# We give the last layer a `gpflow.mean_functions.Zero` mean function (the GPflux default is an Identity mean function).

# %%
num_samples = 5

# %%
Z = X.copy()
M = Z.shape[0]

# Layer 1
inducing_var1 = construct_basic_inducing_variables(M,
                                                   D,
                                                   D,
                                                   share_variables=True,
                                                   z_init=Z.copy())
kernel1 = construct_basic_kernel(
    gpflow.kernels.SquaredExponential(lengthscales=0.15),
    output_dim=D,
    share_hyperparams=True,
)
layer1 = GPLayer(kernel1,
                 inducing_var1,
                 num_data,
                 full_cov=True,
                 num_samples=num_samples)

# Layer 2
inducing_var2 = construct_basic_inducing_variables(M,
示例#11
0
def build_constant_input_dim_deep_gp(X: np.ndarray, num_layers: int,
                                     config: Config) -> DeepGP:
    r"""
    Build a Deep GP consisting of ``num_layers`` :class:`GPLayer`\ s.
    All the hidden layers have the same input dimension as the data, that is, ``X.shape[1]``.

    The architecture is largely based on :cite:t:`salimbeni2017doubly`, with
    the most notable difference being that we keep the hidden dimension equal
    to the input dimensionality of the data.

    .. note::
        This architecture might be slow for high-dimensional data.

    .. note::
        This architecture assumes a :class:`~gpflow.likelihoods.Gaussian` likelihood
        for regression tasks. Specify a different likelihood for performing
        other tasks such as classification.

    :param X: The training input data, used to retrieve the number of datapoints and
        the input dimension and to initialise the inducing point locations using k-means. A
        tensor of rank two with the dimensions ``[num_data, input_dim]``.
    :param num_layers: The number of layers in the Deep GP.
    :param config: The configuration for (hyper)parameters. See :class:`Config` for details.
    """
    num_data, input_dim = X.shape
    X_running = X

    gp_layers = []
    centroids, _ = kmeans2(X, k=config.num_inducing, minit="points")

    for i_layer in range(num_layers):
        is_last_layer = i_layer == num_layers - 1
        D_in = input_dim
        D_out = 1 if is_last_layer else input_dim

        # Pass in kernels, specify output dim (shared hyperparams/variables)

        inducing_var = construct_basic_inducing_variables(
            num_inducing=config.num_inducing,
            input_dim=D_in,
            share_variables=True,
            z_init=centroids)

        kernel = construct_basic_kernel(
            kernels=_construct_kernel(D_in, is_last_layer),
            output_dim=D_out,
            share_hyperparams=True,
        )

        assert config.whiten is True, "non-whitened case not implemented yet"

        if is_last_layer:
            mean_function = gpflow.mean_functions.Zero()
            q_sqrt_scaling = 1.0
        else:
            mean_function = construct_mean_function(X_running, D_in, D_out)
            X_running = mean_function(X_running)
            if tf.is_tensor(X_running):
                X_running = X_running.numpy()
            q_sqrt_scaling = config.inner_layer_qsqrt_factor

        layer = GPLayer(
            kernel,
            inducing_var,
            num_data,
            mean_function=mean_function,
            name=f"gp_{i_layer}",
        )
        layer.q_sqrt.assign(layer.q_sqrt * q_sqrt_scaling)
        gp_layers.append(layer)

    likelihood = Gaussian(config.likelihood_noise_variance)
    return DeepGP(gp_layers, LikelihoodLayer(likelihood))