Beispiel #1
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                q_sqrt = tf.zeros([1] + 2 * [config.num_cond], dtype=floatx())
                kern = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                Z = InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))

                const = tf.random.normal([1], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)

                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
Beispiel #2
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            allK = []
            allZ = []
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                rel_variance = tf.random.uniform(shape=[],
                                                 minval=0.9,
                                                 maxval=1.1,
                                                 dtype=floatx())

                allK.append(
                    cls(lengthscales=lenscales,
                        variance=config.kernel_variance * rel_variance))

                allZ.append(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))

            kern = kernels.SeparateIndependent(allK)
            Z = SeparateIndependentInducingVariables(allZ)

            Kuu = covariances.Kuu(Z,
                                  kern,
                                  jitter=gpflow_config.default_jitter())
            q_sqrt = tf.linalg.cholesky(Kuu)\
                     * tf.random.uniform(shape=[kern.num_latent_gps, 1, 1],
                                         minval=0.0,
                                         maxval=0.5,
                                         dtype=floatx())

            const = tf.random.normal([len(kern.kernels)], dtype=floatx())
            model = SVGP(kernel=kern,
                         likelihood=None,
                         inducing_variable=Z,
                         mean_function=mean_functions.Constant(c=const),
                         q_sqrt=q_sqrt,
                         whiten=False,
                         num_latent_gps=len(allK))

            mf, Sff = subroutine(config, model, X)
            mg, Sgg = model.predict_f(X, full_cov=True)
            tol = config.error_tol
            assert allclose(mf, mg, tol, tol)
            assert allclose(Sff, Sgg, tol, tol)
Beispiel #3
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                kern = kernels.SharedIndependent(base, output_dim=2)

                Z = SharedIndependentInducingVariables(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))
                Kuu = covariances.Kuu(Z,
                                      kern,
                                      jitter=gpflow_config.default_jitter())
                q_sqrt = tf.stack([
                    tf.zeros(2 * [config.num_cond], dtype=floatx()),
                    tf.linalg.cholesky(Kuu)
                ])

                const = tf.random.normal([2], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt,
                             whiten=False,
                             num_latent_gps=2)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)
                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
Beispiel #4
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X_shape = [config.num_test
                       ] + config.image_shape + [config.channels_in]
            X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()),
                           X_shape)
            X /= tf.reduce_max(X)

            patch_len = config.channels_in * int(
                tf.reduce_prod(config.patch_shape))
            for base_cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (patch_len**0.5)
                maxval = config.rel_lengthscales_max * (patch_len**0.5)
                lenscales = tf.random.uniform(shape=[patch_len],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = base_cls(lengthscales=lenscales,
                                variance=config.kernel_variance)
                Z_shape = [config.num_cond
                           ] + config.patch_shape + [config.channels_in]
                for cls in (kernels_ext.Conv2d, kernels_ext.Conv2dTranspose):
                    kern = cls(kernel=base,
                               image_shape=config.image_shape,
                               patch_shape=config.patch_shape,
                               channels_in=config.channels_in,
                               channels_out=config.num_latent_gps,
                               strides=config.strides,
                               padding=config.padding,
                               dilations=config.dilations)

                    Z = InducingImages(
                        tf.random.uniform(Z_shape, dtype=floatx()))
                    q_sqrt = tf.linalg.cholesky(covariances.Kuu(Z, kern))
                    q_sqrt *= tf.random.uniform([config.num_latent_gps, 1, 1],
                                                minval=0.0,
                                                maxval=0.5,
                                                dtype=floatx())

                    # TODO: GPflow's SVGP class is not setup to support outputs defined
                    #       as spatial feature maps. For now, we content ourselves with
                    #       the following hack...
                    const = tf.random.normal([config.num_latent_gps],
                                             dtype=floatx())
                    mean_function = lambda x: const

                    model = SVGP(kernel=kern,
                                 likelihood=None,
                                 mean_function=mean_function,
                                 inducing_variable=Z,
                                 q_sqrt=q_sqrt,
                                 whiten=False,
                                 num_latent_gps=config.num_latent_gps)

                    mf, Sff = subroutine(config, model, X)
                    mg, Sgg = model.predict_f(X, full_cov=True)

                    tol = config.error_tol
                    assert allclose(mf, mg, tol, tol)
                    assert allclose(Sff, Sgg, tol, tol)