def _test_fourier_dense_common(config, kern, X, Z):
    # Test Fourier-feature-based kernel approximator
    Kuu = covariances.Kuu(Z, kern)
    Kfu = covariances.Kfu(Z, kern, X)
    basis = fourier_basis(kern, num_bases=config.num_bases)
    Z_opt = dict()  # options used when evaluating basis/prior at Z
    if isinstance(Z, MultioutputInducingVariables):
        Kff = kern(X, full_cov=True, full_output_cov=False)
        if not isinstance(Z, SharedIndependentInducingVariables):
            # Handling for non-shared multioutput inducing variables.
            # We need to indicate that Z's outermost axis should be
            # evaluated 1-to-1 with the L latent GPs
            Z_opt.setdefault("multioutput_axis", 0)

        feat_x = basis(X)  # [L, N, B]
        feat_z = basis(Z, **Z_opt)  # [L, M, B]
    else:
        Kff = kern(X, full_cov=True)
        feat_x = basis(X)
        feat_z = basis(Z)

    tol = 3 * config.num_bases**-0.5
    assert allclose(tf.matmul(feat_x, feat_x, transpose_b=True), Kff, tol, tol)
    assert allclose(tf.matmul(feat_x, feat_z, transpose_b=True), Kfu, tol, tol)
    assert allclose(tf.matmul(feat_z, feat_z, transpose_b=True), Kuu, tol, tol)
    del feat_x, feat_z

    # Test covariance of functions draw from approximate prior
    fx = []
    fz = []
    count = 0
    while count < config.num_samples:
        size = min(config.shard_size, config.num_samples - count)
        funcs = random_fourier(kern,
                               basis=basis,
                               num_bases=config.num_bases,
                               sample_shape=[size])

        fx.append(funcs(X))
        fz.append(funcs(Z, **Z_opt))
        count += size

    fx = tf.transpose(tf.concat(fx, axis=0))  # [L, N, S]
    fz = tf.transpose(tf.concat(fz, axis=0))  # [L, M, S]
    tol += 3 * config.num_samples**-0.5
    frac = 1 / config.num_samples
    assert allclose(frac * tf.matmul(fx, fx, transpose_b=True), Kff, tol, tol)
    assert allclose(frac * tf.matmul(fx, fz, transpose_b=True), Kfu, tol, tol)
    assert allclose(frac * tf.matmul(fz, fz, transpose_b=True), Kuu, tol, tol)
Beispiel #2
0
def test_depthwise_conv2d(config: ConfigConv2d = None):
    if config is None:
        config = ConfigConv2d()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    X_shape = [config.num_test] + config.image_shape + [config.channels_in]
    X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape)
    X /= tf.reduce_max(X)

    patch_len = int(tf.reduce_prod(config.patch_shape))
    for cls in SupportedBaseKernels:
        minval = config.rel_lengthscales_min * (patch_len**0.5)
        maxval = config.rel_lengthscales_max * (patch_len**0.5)
        lenscales = tf.random.uniform(shape=[config.channels_in, patch_len],
                                      minval=minval,
                                      maxval=maxval,
                                      dtype=floatx())

        base = cls(lengthscales=lenscales, variance=config.kernel_variance)
        kern = kernels.DepthwiseConv2d(kernel=base,
                                       image_shape=config.image_shape,
                                       patch_shape=config.patch_shape,
                                       channels_in=config.channels_in,
                                       channels_out=config.channels_out,
                                       dilations=config.dilations,
                                       padding=config.padding,
                                       strides=config.strides)

        kern._weights = tf.random.normal(kern._weights.shape, dtype=floatx())

        # Test full and shared inducing images
        Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
        Zsrc = tf.random.normal(Z_shape, dtype=floatx())
        for Z in (DepthwiseInducingImages(Zsrc),
                  SharedDepthwiseInducingImages(Zsrc[..., :1],
                                                config.channels_in)):

            test = _Kfu_depthwise_conv2d_fallback(Z, kern, X)
            allclose(covariances.Kfu(Z, kern, X), test)
Beispiel #3
0
def _test_fourier_conv2d_common(config, kern, X, Z):
  # Use closed-form evaluations as ground truth
  Kuu = covariances.Kuu(Z, kern)
  Kfu = covariances.Kfu(Z, kern, X)
  Kff = kern(X, full_cov=True)

  # Test Fourier-feature-based kernel approximator
  basis = fourier_basis(kern, num_bases=config.num_bases)
  feat_x = basis(X)  # [N, B] or [N, L, B]
  feat_z = basis(Z)

  tol = 3 * config.num_bases ** -0.5
  assert allclose(_avg_spatial_inner_product(feat_x, feat_x), Kff, tol, tol)
  assert allclose(_avg_spatial_inner_product(feat_x, feat_z), Kfu, tol, tol)
  assert allclose(_avg_spatial_inner_product(feat_z, feat_z), Kuu, tol, tol)
  del feat_x, feat_z

  # Test covariance of functions draw from approximate prior
  fx = []
  fz = []
  count = 0
  while count < config.num_samples:
    size = min(config.shard_size, config.num_samples - count)
    funcs = random_fourier(kern,
                           basis=basis,
                           num_bases=config.num_bases,
                           sample_shape=[size])

    fx.append(funcs(X))
    fz.append(funcs(Z))
    count += size

  fx = swap_axes(tf.concat(fx, axis=0), 0, -1)  # [L, N, H, W, S]
  fz = swap_axes(tf.concat(fz, axis=0), 0, -1)  # [L, M, 1, 1, S]
  nb = fx.shape.ndims - 4  # num. of batch dimensions
  tol += 3 * config.num_samples ** -0.5
  frac = 1 / config.num_samples

  assert allclose(frac * _avg_spatial_inner_product(fx, fx, nb), Kff, tol, tol)
  assert allclose(frac * _avg_spatial_inner_product(fx, fz, nb), Kfu, tol, tol)
  assert allclose(frac * _avg_spatial_inner_product(fz, fz, nb), Kuu, tol, tol)