Example #1
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                q_sqrt = tf.zeros([1] + 2 * [config.num_cond], dtype=floatx())
                kern = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                Z = InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))

                const = tf.random.normal([1], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)

                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
def test_dense_separate(config: ConfigFourierDense = None):
    if config is None:
        config = ConfigFourierDense()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    allZ = []
    allK = []
    for cls in SupportedBaseKernels:
        lenscales = tf.random.uniform(shape=[config.input_dims],
                                      minval=config.lengthscales_min,
                                      maxval=config.lengthscales_max,
                                      dtype=floatx())

        rel_variance = tf.random.uniform(shape=[],
                                         minval=0.9,
                                         maxval=1.1,
                                         dtype=floatx())

        allK.append(
            cls(lengthscales=lenscales,
                variance=config.kernel_variance * rel_variance))

        allZ.append(
            InducingPoints(
                tf.random.uniform([config.num_cond, config.input_dims],
                                  dtype=floatx())))

    kern = kernels.SeparateIndependent(allK)
    Z = SeparateIndependentInducingVariables(allZ)
    X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx())
    _test_fourier_dense_common(config, kern, X, Z)
Example #3
0
def test_v_prior_dtypes(model_class, args):
    with gpflow.config.as_context():
        set_default_float(np.float32)
        m = model_class(*args)
        assert m.V.prior.dtype == np.float32
        set_default_float(np.float64)
        m = model_class(*args)
        assert m.V.prior.dtype == np.float64
Example #4
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            allK = []
            allZ = []
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                rel_variance = tf.random.uniform(shape=[],
                                                 minval=0.9,
                                                 maxval=1.1,
                                                 dtype=floatx())

                allK.append(
                    cls(lengthscales=lenscales,
                        variance=config.kernel_variance * rel_variance))

                allZ.append(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))

            kern = kernels.SeparateIndependent(allK)
            Z = SeparateIndependentInducingVariables(allZ)

            Kuu = covariances.Kuu(Z,
                                  kern,
                                  jitter=gpflow_config.default_jitter())
            q_sqrt = tf.linalg.cholesky(Kuu)\
                     * tf.random.uniform(shape=[kern.num_latent_gps, 1, 1],
                                         minval=0.0,
                                         maxval=0.5,
                                         dtype=floatx())

            const = tf.random.normal([len(kern.kernels)], dtype=floatx())
            model = SVGP(kernel=kern,
                         likelihood=None,
                         inducing_variable=Z,
                         mean_function=mean_functions.Constant(c=const),
                         q_sqrt=q_sqrt,
                         whiten=False,
                         num_latent_gps=len(allK))

            mf, Sff = subroutine(config, model, X)
            mg, Sgg = model.predict_f(X, full_cov=True)
            tol = config.error_tol
            assert allclose(mf, mg, tol, tol)
            assert allclose(Sff, Sgg, tol, tol)
Example #5
0
    def __init__(self, 
                 data: Tuple[tf.Tensor, tf.Tensor], 
                 m: int = 20, 
                 d: int = 1,
                 alpha: np.float = 1./np.sqrt(2.), 
                 eps_sq: np.float = 1,
                 sigma_n_sq: np.float = 1,
                 sigma_f_sq: np.float = 1,
                 dir_weights: str = None):
                    
        if data[1].dtype == np.float64:
            K_bd.set_floatx('float64')
        else:
            set_default_float(np.float32)

        self.num_data = tf.cast(data[1].shape[0], default_float())
        self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
        self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
               
        self.flag_1d = d == 1
        self.alpha = tf.cast(alpha, default_float())
        self.alpha_sq = tf.square(self.alpha)
        self.m = tf.cast(m, default_float())
        self.this_range = tf.constant(np.asarray(list(product(range(1, m + 1), repeat=d))).squeeze(), dtype=default_float())
        self.this_range_1 = self.this_range - 1.
        self.this_range_1_2 = self.this_range_1 if self.flag_1d else tf.range(m, dtype=default_float())
        self.this_range_1_int = tf.cast(self.this_range_1, tf.int32)
        self.tf_range_dnn_out = tf.range(d)
        self.this_range_1_ln2 = np.log(2.)*self.this_range_1

        self.vander_range = tf.range(m+1, dtype=default_float())
        self.eye_k = tf.eye(m**d, dtype=default_float())
        self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
        self.coeff_n_tf = tf.constant(np.load(os.path.dirname(os.path.realpath(__file__)) + '/hermite_coeff.npy')[:m, :m], dtype=default_float())
        
        eps_sq = eps_sq*np.ones(d) if d > 1 else eps_sq       
        self.eps_sq = Parameter(eps_sq, transform=positive(), dtype=default_float())
        self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
        self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
       
        model = models.Sequential()
        model.add(layers.Dense(512, activation='tanh', input_dim=data[0].shape[1]))        
        model.add(layers.Dense(256, activation='tanh'))
        model.add(layers.Dense(64, activation='tanh'))
        model.add(layers.Dense(d))      
        
        if dir_weights is not None:
            model.load_weights(dir_weights)
        self.neural_net = model
Example #6
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                kern = kernels.SharedIndependent(base, output_dim=2)

                Z = SharedIndependentInducingVariables(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))
                Kuu = covariances.Kuu(Z,
                                      kern,
                                      jitter=gpflow_config.default_jitter())
                q_sqrt = tf.stack([
                    tf.zeros(2 * [config.num_cond], dtype=floatx()),
                    tf.linalg.cholesky(Kuu)
                ])

                const = tf.random.normal([2], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt,
                             whiten=False,
                             num_latent_gps=2)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)
                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
def test_dense(config: ConfigFourierDense = None):
    if config is None:
        config = ConfigFourierDense()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx())
    Z = tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx())
    Z = InducingPoints(Z)
    for cls in SupportedBaseKernels:
        lenscales = tf.random.uniform(shape=[config.input_dims],
                                      minval=config.lengthscales_min,
                                      maxval=config.lengthscales_max,
                                      dtype=floatx())

        kern = cls(lengthscales=lenscales, variance=config.kernel_variance)
        _test_fourier_dense_common(config, kern, X, Z)
Example #8
0
def test_depthwise_conv2d(config: ConfigConv2d = None):
    if config is None:
        config = ConfigConv2d()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    X_shape = [config.num_test] + config.image_shape + [config.channels_in]
    X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape)
    X /= tf.reduce_max(X)

    patch_len = int(tf.reduce_prod(config.patch_shape))
    for cls in SupportedBaseKernels:
        minval = config.rel_lengthscales_min * (patch_len**0.5)
        maxval = config.rel_lengthscales_max * (patch_len**0.5)
        lenscales = tf.random.uniform(shape=[config.channels_in, patch_len],
                                      minval=minval,
                                      maxval=maxval,
                                      dtype=floatx())

        base = cls(lengthscales=lenscales, variance=config.kernel_variance)
        kern = kernels.DepthwiseConv2d(kernel=base,
                                       image_shape=config.image_shape,
                                       patch_shape=config.patch_shape,
                                       channels_in=config.channels_in,
                                       channels_out=config.channels_out,
                                       dilations=config.dilations,
                                       padding=config.padding,
                                       strides=config.strides)

        kern._weights = tf.random.normal(kern._weights.shape, dtype=floatx())

        # Test full and shared inducing images
        Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
        Zsrc = tf.random.normal(Z_shape, dtype=floatx())
        for Z in (DepthwiseInducingImages(Zsrc),
                  SharedDepthwiseInducingImages(Zsrc[..., :1],
                                                config.channels_in)):

            test = _Kfu_depthwise_conv2d_fallback(Z, kern, X)
            allclose(covariances.Kfu(Z, kern, X), test)
Example #9
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_cond, config.input_dims],
                                  dtype=floatx())
            Xnew = tf.random.uniform([config.num_test, config.input_dims],
                                     dtype=floatx())
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                kern = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                const = tf.random.normal([1], dtype=floatx())

                K = kern(X, full_cov=True)
                K = tf.linalg.set_diag(
                    K,
                    tf.linalg.diag_part(K) + config.noise_variance)
                L = tf.linalg.cholesky(K)
                y = L @ tf.random.normal([L.shape[-1], 1],
                                         dtype=floatx()) + const

                model = GPR(kernel=kern,
                            noise_variance=config.noise_variance,
                            data=(X, y),
                            mean_function=mean_functions.Constant(c=const))

                mf, Sff = subroutine(config, model, Xnew)
                mg, Sgg = model.predict_f(Xnew, full_cov=True)

                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
Example #10
0
def test_conv2d(config: ConfigFourierConv2d = None):
  """
  TODO: Consider separating out the test for Conv2dTranspose since it only
  supports a subset of strides/dilatons.
  """
  if config is None:
    config = ConfigFourierConv2d()

  tf.random.set_seed(config.seed)
  gpflow_config.set_default_float(config.floatx)
  gpflow_config.set_default_jitter(config.jitter)

  X_shape = [config.num_test] + config.image_shape + [config.channels_in]
  X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape)
  X /= tf.reduce_max(X)

  Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
  Zsrc = tf.random.normal(Z_shape, dtype=floatx())
  Z = inducing_variables.InducingImages(Zsrc)

  patch_len = config.channels_in * config.patch_shape[0] * config.patch_shape[1]
  for base_cls in SupportedBaseKernels:
    minval = config.rel_lengthscales_min * (patch_len ** 0.5)
    maxval = config.rel_lengthscales_max * (patch_len ** 0.5)
    lenscales = tf.random.uniform(shape=[patch_len],
                                  minval=minval,
                                  maxval=maxval,
                                  dtype=floatx())

    base = base_cls(lengthscales=lenscales, variance=config.kernel_variance)
    for cls in (kernels.Conv2d, kernels.Conv2dTranspose):
      kern = cls(kernel=base,
                 image_shape=config.image_shape,
                 patch_shape=config.patch_shape,
                 channels_in=config.channels_in,
                 channels_out=config.num_latent_gps,
                 dilations=config.dilations,
                 strides=config.strides)

      _test_fourier_conv2d_common(config, kern, X, Z)
Example #11
0
 def __init__(self, 
              data: Tuple[tf.Tensor, tf.Tensor],  
              m: int = 100, 
              d: int = 4,
              lengthscales = None,
              sigma_n_sq: np.float = 1,
              sigma_f_sq: np.float = 1,
              dir_weights: str = None):
                 
     if data[1].dtype == np.float64:
         K_bd.set_floatx('float64')
     else:
         set_default_float(np.float32)
         
     self.num_data = tf.cast(data[1].shape[0], default_float())
     self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
     self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
                    
     self.eye_2m = tf.eye(2*m, dtype=default_float())
     self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
     self.m_float = tf.cast(m, default_float())
     self.randn = tf.random.normal(shape=[m, d], dtype=default_float())
     
     lengthscales0 = np.ones(d) if lengthscales is None else lengthscales
     self.lengthscales = Parameter(lengthscales0, transform=positive(), dtype=default_float())
     self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
     self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
    
     model = models.Sequential()
     model.add(layers.Dense(512, activation='tanh', input_dim=data[0].shape[1]))        
     model.add(layers.Dense(256, activation='tanh'))
     model.add(layers.Dense(64, activation='tanh'))
     model.add(layers.Dense(d))      
     
     if dir_weights is not None:
         model.load_weights(dir_weights)
     self.neural_net = model
Example #12
0
def test_depthwise_conv2d(config: ConfigFourierConv2d = None):
  if config is None:
    config = ConfigFourierConv2d()

  assert config.num_bases % config.channels_in == 0
  tf.random.set_seed(config.seed)
  gpflow_config.set_default_float(config.floatx)
  gpflow_config.set_default_jitter(config.jitter)

  X_shape = [config.num_test] + config.image_shape + [config.channels_in]
  X = tf.random.uniform(X_shape, dtype=floatx())

  img_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
  Zsrc = tf.random.normal(img_shape, dtype=floatx())
  Z = inducing_variables.DepthwiseInducingImages(Zsrc)

  patch_len = config.patch_shape[0] * config.patch_shape[1]
  for base_cls in SupportedBaseKernels:
    minval = config.rel_lengthscales_min * (patch_len ** 0.5)
    maxval = config.rel_lengthscales_max * (patch_len ** 0.5)
    lenscales = tf.random.uniform(shape=[config.channels_in, patch_len],
                                  minval=minval,
                                  maxval=maxval,
                                  dtype=floatx())

    base = base_cls(lengthscales=lenscales, variance=config.kernel_variance)
    for cls in (kernels.DepthwiseConv2d,):
      kern = cls(kernel=base,
                 image_shape=config.image_shape,
                 patch_shape=config.patch_shape,
                 channels_in=config.channels_in,
                 channels_out=config.num_latent_gps,
                 dilations=config.dilations,
                 strides=config.strides)

      _test_fourier_conv2d_common(config, kern, X, Z)
import gpflow
import numpy as np

import matplotlib.pyplot as plt
import tensorflow as tf

from gpflow.utilities import ops, print_summary
from gpflow.config import set_default_float, default_float, set_default_summary_fmt
from gpflow.ci_utils import ci_niter

set_default_float(np.float64)


def sinusoid(x, scale=3, shift=0):
    return np.cos(scale * 2 * np.pi * (x[..., 0]) + shift)


SCALE_SHARED = 0.5
SCALE_FG = 1
SHIFT_FG = 0.25 * np.pi
SHIFT_SHARED = 0


def generate_1d_contrastive_data(num_training_points,
                                 observation_noise_variance):
    """Generate noisy sinusoidal observations at a random set of points.

    Returns:
       observation_index_points, observations
    """
Example #14
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X_shape = [config.num_test
                       ] + config.image_shape + [config.channels_in]
            X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()),
                           X_shape)
            X /= tf.reduce_max(X)

            patch_len = config.channels_in * int(
                tf.reduce_prod(config.patch_shape))
            for base_cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (patch_len**0.5)
                maxval = config.rel_lengthscales_max * (patch_len**0.5)
                lenscales = tf.random.uniform(shape=[patch_len],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = base_cls(lengthscales=lenscales,
                                variance=config.kernel_variance)
                Z_shape = [config.num_cond
                           ] + config.patch_shape + [config.channels_in]
                for cls in (kernels_ext.Conv2d, kernels_ext.Conv2dTranspose):
                    kern = cls(kernel=base,
                               image_shape=config.image_shape,
                               patch_shape=config.patch_shape,
                               channels_in=config.channels_in,
                               channels_out=config.num_latent_gps,
                               strides=config.strides,
                               padding=config.padding,
                               dilations=config.dilations)

                    Z = InducingImages(
                        tf.random.uniform(Z_shape, dtype=floatx()))
                    q_sqrt = tf.linalg.cholesky(covariances.Kuu(Z, kern))
                    q_sqrt *= tf.random.uniform([config.num_latent_gps, 1, 1],
                                                minval=0.0,
                                                maxval=0.5,
                                                dtype=floatx())

                    # TODO: GPflow's SVGP class is not setup to support outputs defined
                    #       as spatial feature maps. For now, we content ourselves with
                    #       the following hack...
                    const = tf.random.normal([config.num_latent_gps],
                                             dtype=floatx())
                    mean_function = lambda x: const

                    model = SVGP(kernel=kern,
                                 likelihood=None,
                                 mean_function=mean_function,
                                 inducing_variable=Z,
                                 q_sqrt=q_sqrt,
                                 whiten=False,
                                 num_latent_gps=config.num_latent_gps)

                    mf, Sff = subroutine(config, model, X)
                    mg, Sgg = model.predict_f(X, full_cov=True)

                    tol = config.error_tol
                    assert allclose(mf, mg, tol, tol)
                    assert allclose(Sff, Sgg, tol, tol)