def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) q_sqrt = tf.zeros([1] + 2 * [config.num_cond], dtype=floatx()) kern = cls(lengthscales=lenscales, variance=config.kernel_variance) Z = InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())) const = tf.random.normal([1], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def test_dense_separate(config: ConfigFourierDense = None): if config is None: config = ConfigFourierDense() tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) allZ = [] allK = [] for cls in SupportedBaseKernels: lenscales = tf.random.uniform(shape=[config.input_dims], minval=config.lengthscales_min, maxval=config.lengthscales_max, dtype=floatx()) rel_variance = tf.random.uniform(shape=[], minval=0.9, maxval=1.1, dtype=floatx()) allK.append( cls(lengthscales=lenscales, variance=config.kernel_variance * rel_variance)) allZ.append( InducingPoints( tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx()))) kern = kernels.SeparateIndependent(allK) Z = SeparateIndependentInducingVariables(allZ) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) _test_fourier_dense_common(config, kern, X, Z)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) allK = [] allZ = [] Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) rel_variance = tf.random.uniform(shape=[], minval=0.9, maxval=1.1, dtype=floatx()) allK.append( cls(lengthscales=lenscales, variance=config.kernel_variance * rel_variance)) allZ.append( InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))) kern = kernels.SeparateIndependent(allK) Z = SeparateIndependentInducingVariables(allZ) Kuu = covariances.Kuu(Z, kern, jitter=gpflow_config.default_jitter()) q_sqrt = tf.linalg.cholesky(Kuu)\ * tf.random.uniform(shape=[kern.num_latent_gps, 1, 1], minval=0.0, maxval=0.5, dtype=floatx()) const = tf.random.normal([len(kern.kernels)], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt, whiten=False, num_latent_gps=len(allK)) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) base = cls(lengthscales=lenscales, variance=config.kernel_variance) kern = kernels.SharedIndependent(base, output_dim=2) Z = SharedIndependentInducingVariables( InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))) Kuu = covariances.Kuu(Z, kern, jitter=gpflow_config.default_jitter()) q_sqrt = tf.stack([ tf.zeros(2 * [config.num_cond], dtype=floatx()), tf.linalg.cholesky(Kuu) ]) const = tf.random.normal([2], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt, whiten=False, num_latent_gps=2) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def test_dense(config: ConfigFourierDense = None): if config is None: config = ConfigFourierDense() tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) Z = tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx()) Z = InducingPoints(Z) for cls in SupportedBaseKernels: lenscales = tf.random.uniform(shape=[config.input_dims], minval=config.lengthscales_min, maxval=config.lengthscales_max, dtype=floatx()) kern = cls(lengthscales=lenscales, variance=config.kernel_variance) _test_fourier_dense_common(config, kern, X, Z)
def test_depthwise_conv2d(config: ConfigConv2d = None): if config is None: config = ConfigConv2d() tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X_shape = [config.num_test] + config.image_shape + [config.channels_in] X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape) X /= tf.reduce_max(X) patch_len = int(tf.reduce_prod(config.patch_shape)) for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (patch_len**0.5) maxval = config.rel_lengthscales_max * (patch_len**0.5) lenscales = tf.random.uniform(shape=[config.channels_in, patch_len], minval=minval, maxval=maxval, dtype=floatx()) base = cls(lengthscales=lenscales, variance=config.kernel_variance) kern = kernels.DepthwiseConv2d(kernel=base, image_shape=config.image_shape, patch_shape=config.patch_shape, channels_in=config.channels_in, channels_out=config.channels_out, dilations=config.dilations, padding=config.padding, strides=config.strides) kern._weights = tf.random.normal(kern._weights.shape, dtype=floatx()) # Test full and shared inducing images Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in] Zsrc = tf.random.normal(Z_shape, dtype=floatx()) for Z in (DepthwiseInducingImages(Zsrc), SharedDepthwiseInducingImages(Zsrc[..., :1], config.channels_in)): test = _Kfu_depthwise_conv2d_fallback(Z, kern, X) allclose(covariances.Kfu(Z, kern, X), test)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx()) Xnew = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) kern = cls(lengthscales=lenscales, variance=config.kernel_variance) const = tf.random.normal([1], dtype=floatx()) K = kern(X, full_cov=True) K = tf.linalg.set_diag( K, tf.linalg.diag_part(K) + config.noise_variance) L = tf.linalg.cholesky(K) y = L @ tf.random.normal([L.shape[-1], 1], dtype=floatx()) + const model = GPR(kernel=kern, noise_variance=config.noise_variance, data=(X, y), mean_function=mean_functions.Constant(c=const)) mf, Sff = subroutine(config, model, Xnew) mg, Sgg = model.predict_f(Xnew, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def test_conv2d(config: ConfigFourierConv2d = None): """ TODO: Consider separating out the test for Conv2dTranspose since it only supports a subset of strides/dilatons. """ if config is None: config = ConfigFourierConv2d() tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X_shape = [config.num_test] + config.image_shape + [config.channels_in] X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape) X /= tf.reduce_max(X) Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in] Zsrc = tf.random.normal(Z_shape, dtype=floatx()) Z = inducing_variables.InducingImages(Zsrc) patch_len = config.channels_in * config.patch_shape[0] * config.patch_shape[1] for base_cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (patch_len ** 0.5) maxval = config.rel_lengthscales_max * (patch_len ** 0.5) lenscales = tf.random.uniform(shape=[patch_len], minval=minval, maxval=maxval, dtype=floatx()) base = base_cls(lengthscales=lenscales, variance=config.kernel_variance) for cls in (kernels.Conv2d, kernels.Conv2dTranspose): kern = cls(kernel=base, image_shape=config.image_shape, patch_shape=config.patch_shape, channels_in=config.channels_in, channels_out=config.num_latent_gps, dilations=config.dilations, strides=config.strides) _test_fourier_conv2d_common(config, kern, X, Z)
def test_depthwise_conv2d(config: ConfigFourierConv2d = None): if config is None: config = ConfigFourierConv2d() assert config.num_bases % config.channels_in == 0 tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X_shape = [config.num_test] + config.image_shape + [config.channels_in] X = tf.random.uniform(X_shape, dtype=floatx()) img_shape = [config.num_cond] + config.patch_shape + [config.channels_in] Zsrc = tf.random.normal(img_shape, dtype=floatx()) Z = inducing_variables.DepthwiseInducingImages(Zsrc) patch_len = config.patch_shape[0] * config.patch_shape[1] for base_cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (patch_len ** 0.5) maxval = config.rel_lengthscales_max * (patch_len ** 0.5) lenscales = tf.random.uniform(shape=[config.channels_in, patch_len], minval=minval, maxval=maxval, dtype=floatx()) base = base_cls(lengthscales=lenscales, variance=config.kernel_variance) for cls in (kernels.DepthwiseConv2d,): kern = cls(kernel=base, image_shape=config.image_shape, patch_shape=config.patch_shape, channels_in=config.channels_in, channels_out=config.num_latent_gps, dilations=config.dilations, strides=config.strides) _test_fourier_conv2d_common(config, kern, X, Z)
def test_jitter_errorcheck(): with pytest.raises(TypeError): set_default_jitter("not a float") with pytest.raises(ValueError): set_default_jitter(-1e-10)
def test_jitter_setting(): set_default_jitter(1e-3) assert default_jitter() == 1e-3 set_default_jitter(1e-6) assert default_jitter() == 1e-6
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X_shape = [config.num_test ] + config.image_shape + [config.channels_in] X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape) X /= tf.reduce_max(X) patch_len = config.channels_in * int( tf.reduce_prod(config.patch_shape)) for base_cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (patch_len**0.5) maxval = config.rel_lengthscales_max * (patch_len**0.5) lenscales = tf.random.uniform(shape=[patch_len], minval=minval, maxval=maxval, dtype=floatx()) base = base_cls(lengthscales=lenscales, variance=config.kernel_variance) Z_shape = [config.num_cond ] + config.patch_shape + [config.channels_in] for cls in (kernels_ext.Conv2d, kernels_ext.Conv2dTranspose): kern = cls(kernel=base, image_shape=config.image_shape, patch_shape=config.patch_shape, channels_in=config.channels_in, channels_out=config.num_latent_gps, strides=config.strides, padding=config.padding, dilations=config.dilations) Z = InducingImages( tf.random.uniform(Z_shape, dtype=floatx())) q_sqrt = tf.linalg.cholesky(covariances.Kuu(Z, kern)) q_sqrt *= tf.random.uniform([config.num_latent_gps, 1, 1], minval=0.0, maxval=0.5, dtype=floatx()) # TODO: GPflow's SVGP class is not setup to support outputs defined # as spatial feature maps. For now, we content ourselves with # the following hack... const = tf.random.normal([config.num_latent_gps], dtype=floatx()) mean_function = lambda x: const model = SVGP(kernel=kern, likelihood=None, mean_function=mean_function, inducing_variable=Z, q_sqrt=q_sqrt, whiten=False, num_latent_gps=config.num_latent_gps) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)