def test_different_params_similar_approximation(self, initializer, scale):
        random_seed.set_random_seed(12345)
        rff_layer1 = kernel_layers.RandomFourierFeatures(
            output_dim=3000,
            kernel_initializer=initializer,
            scale=scale,
            name='rff1')
        rff_layer2 = kernel_layers.RandomFourierFeatures(
            output_dim=2000,
            kernel_initializer=initializer,
            scale=scale,
            name='rff2')
        # Two distinct inputs.
        x = constant_op.constant([[1.0, -1.0, 0.5]])
        y = constant_op.constant([[-1.0, 1.0, 1.0]])

        # Apply both layers to both inputs.
        output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
        output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
        output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
        output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)

        # Compute the inner products of the outputs (on inputs x and y) for both
        # layers. For any fixed random features layer rff_layer, and inputs x, y,
        # rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
        approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
        approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
        self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
Exemplo n.º 2
0
  def test_different_params_similar_approximation(self, initializer, scale):
    # Layers initialized using different randomness (seed).
    rff_layer1 = kernel_layers.RandomFourierFeatures(
        output_dim=3000,
        kernel_initializer=initializer,
        scale=scale,
        name='rff1')
    rff_layer2 = kernel_layers.RandomFourierFeatures(
        output_dim=2000,
        kernel_initializer=initializer,
        scale=scale,
        name='rff2')
    # Two distinct inputs.
    x = constant_op.constant([[1.0, -1.0, 0.5]])
    y = constant_op.constant([[-1.0, 1.0, 1.0]])

    # Apply both layers to both inputs.
    output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1.apply(x)
    output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1.apply(y)
    output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2.apply(x)
    output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2.apply(y)

    # Compute the inner products of the outputs (on inputs x and y) for both
    # layers. For any fixed random features layer rff_layer, and inputs x, y,
    # rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
    approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
    approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
    self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.05)
    def test_good_kernel_approximation_multiple_inputs(self, initializer,
                                                       scale, exact_kernel_fn):
        # Parameters.
        input_dim = 5
        output_dim = 2000
        x_rows = 20
        y_rows = 30

        x = constant_op.constant(np.random.uniform(size=(x_rows, input_dim)),
                                 dtype=dtypes.float32)
        y = constant_op.constant(np.random.uniform(size=(y_rows, input_dim)),
                                 dtype=dtypes.float32)

        random_seed.set_random_seed(1234)
        rff_layer = kernel_layers.RandomFourierFeatures(
            output_dim=output_dim,
            kernel_initializer=initializer,
            scale=scale,
            name='random_fourier_features')

        # The shapes of output_x and output_y are (x_rows, output_dim) and
        # (y_rows, output_dim) respectively.
        output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
        output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)

        approx_kernel_matrix = kernelized_utils.inner_product(
            output_x, output_y)
        exact_kernel_matrix = exact_kernel_fn(x, y)
        self._assert_all_close(approx_kernel_matrix,
                               exact_kernel_matrix,
                               atol=0.05)
Exemplo n.º 4
0
  def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
    """Approximation is bad when output dimension is small."""
    # Two distinct inputs.
    x = constant_op.constant([[1.0, -1.0, 0.5]])
    y = constant_op.constant([[-1.0, 1.0, 1.0]])

    small_output_dim = 10
    random_seed.set_random_seed(1234)
    # Initialize layer.
    rff_layer = kernel_layers.RandomFourierFeatures(
        output_dim=small_output_dim,
        kernel_initializer=initializer,
        scale=scale,
        name='random_fourier_features')

    # Apply layer to both inputs.
    output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
    output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)

    # The inner products of the outputs (on inputs x and y) approximates the
    # real value of the RBF kernel but poorly since the output dimension of the
    # layer is small.
    exact_kernel_value = exact_kernel_fn(x, y)
    approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
    abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
    if not context.executing_eagerly():
      with self.cached_session() as sess:
        keras_backend._initialize_variables(sess)
        abs_error_eval = sess.run([abs_error])
        self.assertGreater(abs_error_eval[0][0], 0.05)
        self.assertLess(abs_error_eval[0][0], 0.5)
    else:
      self.assertGreater(abs_error, 0.05)
      self.assertLess(abs_error, 0.5)
Exemplo n.º 5
0
    def test_good_kernel_approximation_multiple_inputs(self, initializer,
                                                       scale, exact_kernel_fn):
        # Parameters.
        input_dim = 5
        output_dim = 5000
        x_rows = 20
        y_rows = 30

        random_seed.set_random_seed(1234)
        x = random_ops.random_uniform(shape=(x_rows, input_dim), maxval=1.0)
        y = random_ops.random_uniform(shape=(y_rows, input_dim), maxval=1.0)

        rff_layer = kernel_layers.RandomFourierFeatures(
            output_dim=output_dim,
            kernel_initializer=initializer,
            scale=scale,
            name='random_fourier_features')

        # The shapes of output_x and output_y are (x_rows, output_dim) and
        # (y_rows, output_dim) respectively.
        output_x = math.sqrt(2.0 / output_dim) * rff_layer.apply(x)
        output_y = math.sqrt(2.0 / output_dim) * rff_layer.apply(y)

        approx_kernel_matrix = kernelized_utils.inner_product(
            output_x, output_y)
        exact_kernel_matrix = exact_kernel_fn(x, y)
        self._assert_all_close(approx_kernel_matrix,
                               exact_kernel_matrix,
                               atol=0.1)
Exemplo n.º 6
0
  def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
                                                     exact_kernel_fn):
    # Parameters.
    input_dim = 5
    output_dim = 5000
    x_rows = 20
    y_rows = 30

    random_seed.set_random_seed(1234)
    x = random_ops.random_uniform(shape=(x_rows, input_dim), maxval=1.0)
    y = random_ops.random_uniform(shape=(y_rows, input_dim), maxval=1.0)

    rff_layer = kernel_layers.RandomFourierFeatures(
        output_dim=output_dim,
        kernel_initializer=initializer,
        scale=scale,
        name='random_fourier_features')

    # The shapes of output_x and output_y are (x_rows, output_dim) and
    # (y_rows, output_dim) respectively.
    output_x = math.sqrt(2.0 / output_dim) * rff_layer.apply(x)
    output_y = math.sqrt(2.0 / output_dim) * rff_layer.apply(y)

    approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
    exact_kernel_matrix = exact_kernel_fn(x, y)
    self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.1)
Exemplo n.º 7
0
  def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
    """Approximation is bad when output dimension is small."""
    # Two distinct inputs.
    x = constant_op.constant([[1.0, -1.0, 0.5]])
    y = constant_op.constant([[-1.0, 1.0, 1.0]])

    small_output_dim = 10
    random_seed.set_random_seed(1234)
    # Initialize layer.
    rff_layer = kernel_layers.RandomFourierFeatures(
        output_dim=small_output_dim,
        kernel_initializer=initializer,
        scale=scale,
        name='random_fourier_features')

    # Apply layer to both inputs.
    output_x = math.sqrt(2.0 / small_output_dim) * rff_layer.apply(x)
    output_y = math.sqrt(2.0 / small_output_dim) * rff_layer.apply(y)

    # The inner products of the outputs (on inputs x and y) approximates the
    # real value of the RBF kernel but poorly since the output dimension of the
    # layer is small.
    exact_kernel_value = exact_kernel_fn(x, y)
    approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
    abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
    if not context.executing_eagerly():
      with self.cached_session() as sess:
        keras_backend._initialize_variables(sess)
        abs_error_eval = sess.run([abs_error])
        self.assertGreater(abs_error_eval[0][0], 0.05)
        self.assertLess(abs_error_eval[0][0], 0.5)
    else:
      self.assertGreater(abs_error, 0.05)
      self.assertLess(abs_error, 0.5)
def test_fourier_feature_layer_compute_covariance_of_inducing_variables(batch_size):
    """
    Ensure that the random fourier feature map can be used to approximate the covariance matrix
    between the inducing point vectors of a sparse GP, with the condition that the number of latent
    GP models is greater than one.
    """
    n_features = 10000

    kernel = gpflow.kernels.SquaredExponential()
    fourier_features = RandomFourierFeatures(kernel, n_features, dtype=tf.float64)

    x_new = tf.ones(shape=(2 * batch_size + 1, 1), dtype=tf.float64)

    u = fourier_features(x_new)
    approx_kernel_matrix = inner_product(u, u)

    actual_kernel_matrix = kernel.K(x_new, x_new)

    np.testing.assert_allclose(approx_kernel_matrix, actual_kernel_matrix, atol=0.05)
def test_fourier_features_can_approximate_kernel_multidim(kernel_class, lengthscale, n_dims):
    n_features = 10000
    x_rows = 20
    y_rows = 30
    # ARD
    lengthscales = np.random.rand((n_dims)) * lengthscale

    kernel = kernel_class(lengthscales=lengthscales)
    fourier_features = RandomFourierFeatures(kernel, n_features, dtype=tf.float64)

    x = tf.random.uniform((x_rows, n_dims), dtype=tf.float64)
    y = tf.random.uniform((y_rows, n_dims), dtype=tf.float64)

    u = fourier_features(x)
    v = fourier_features(y)
    approx_kernel_matrix = inner_product(u, v)

    actual_kernel_matrix = kernel.K(x, y)

    np.testing.assert_allclose(approx_kernel_matrix, actual_kernel_matrix, atol=0.05)