Exemple #1
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                q_sqrt = tf.zeros([1] + 2 * [config.num_cond], dtype=floatx())
                kern = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                Z = InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))

                const = tf.random.normal([1], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)

                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
def test_dense_separate(config: ConfigFourierDense = None):
    if config is None:
        config = ConfigFourierDense()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    allZ = []
    allK = []
    for cls in SupportedBaseKernels:
        lenscales = tf.random.uniform(shape=[config.input_dims],
                                      minval=config.lengthscales_min,
                                      maxval=config.lengthscales_max,
                                      dtype=floatx())

        rel_variance = tf.random.uniform(shape=[],
                                         minval=0.9,
                                         maxval=1.1,
                                         dtype=floatx())

        allK.append(
            cls(lengthscales=lenscales,
                variance=config.kernel_variance * rel_variance))

        allZ.append(
            InducingPoints(
                tf.random.uniform([config.num_cond, config.input_dims],
                                  dtype=floatx())))

    kern = kernels.SeparateIndependent(allK)
    Z = SeparateIndependentInducingVariables(allZ)
    X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx())
    _test_fourier_dense_common(config, kern, X, Z)
Exemple #3
0
def _test_linear_svgp(config: ConfigDense, model: SVGP,
                      Xnew: tf.Tensor) -> tf.Tensor:
    """
  Sample generation subroutine common to each unit test
  """
    Z = model.inducing_variable
    count = 0
    basis = fourier_basis(model.kernel, num_bases=config.num_bases)
    L_joint = None
    samples = []
    while count < config.num_samples:
        # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$
        size = min(config.shard_size, config.num_samples - count)
        shape = model.num_latent_gps, config.num_cond, size
        rvs = tf.random.normal(shape=shape, dtype=floatx())
        u = tf.transpose(model.q_sqrt @ rvs)

        # Generate draws from the joint distribution $p(f(X), g(Z))$
        (f, fnew), L_joint = common.sample_joint(model.kernel,
                                                 Z,
                                                 Xnew,
                                                 num_samples=size,
                                                 L=L_joint)

        # Solve for update functions
        update_fns = linear_update(Z, u, f, basis=basis)
        samples.append(fnew + update_fns(Xnew))
        count += size

    samples = tf.concat(samples, axis=0)
    if model.mean_function is not None:
        samples += model.mean_function(Xnew)
    return samples
Exemple #4
0
def _sample_joint_conv2d(kern,
                         Z,
                         Xnew,
                         num_samples: int,
                         L: TensorLike = None,
                         diag: Union[float, tf.Tensor] = None):
    """
  Sample from the joint distribution of $f(X), g(Z)$ via a
  location-scale transform.
  """
    if diag is None:
        diag = default_jitter()

    # Construct joint covariance and compute matrix square root
    if L is None:
        Zp = Z.as_patches  # [M, patch_len]
        Xp = kern.get_patches(Xnew, full_spatial=False)
        P = tf.concat([Zp, tf.reshape(Xp, [-1, Xp.shape[-1]])], axis=0)
        K = kern.kernel(P, full_cov=True)
        K = tf.linalg.set_diag(K, tf.linalg.diag_part(K) + diag)
        L = tf.linalg.cholesky(K)
        L = tf.tile(L[None], [kern.channels_out, 1, 1])  # TODO: Improve me

    # Draw samples using a location-scale transform
    spatial_in = Xnew.shape[-3:-1]
    spatial_out = kern.get_spatial_out(spatial_in)
    rvs = tf.random.normal(list(L.shape[:-1]) + [num_samples], dtype=floatx())
    draws = tf.transpose(L @ rvs)  # [S, M + P, L]
    fz, fx = tf.split(draws, [len(Z), -1], axis=1)

    # Reorganize $f(X)$ as a 3d feature map
    fx_shape = [num_samples, Xnew.shape[0]] + spatial_out + [kern.channels_out]
    fx = tf.reshape(fx, fx_shape)
    return (fz, fx), L
Exemple #5
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                kern = kernels.SharedIndependent(base, output_dim=2)

                Z = SharedIndependentInducingVariables(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))
                Kuu = covariances.Kuu(Z,
                                      kern,
                                      jitter=gpflow_config.default_jitter())
                q_sqrt = tf.stack([
                    tf.zeros(2 * [config.num_cond], dtype=floatx()),
                    tf.linalg.cholesky(Kuu)
                ])

                const = tf.random.normal([2], dtype=floatx())
                model = SVGP(kernel=kern,
                             likelihood=None,
                             inducing_variable=Z,
                             mean_function=mean_functions.Constant(c=const),
                             q_sqrt=q_sqrt,
                             whiten=False,
                             num_latent_gps=2)

                mf, Sff = subroutine(config, model, X)
                mg, Sgg = model.predict_f(X, full_cov=True)
                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
def test_dense(config: ConfigFourierDense = None):
    if config is None:
        config = ConfigFourierDense()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx())
    Z = tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx())
    Z = InducingPoints(Z)
    for cls in SupportedBaseKernels:
        lenscales = tf.random.uniform(shape=[config.input_dims],
                                      minval=config.lengthscales_min,
                                      maxval=config.lengthscales_max,
                                      dtype=floatx())

        kern = cls(lengthscales=lenscales, variance=config.kernel_variance)
        _test_fourier_dense_common(config, kern, X, Z)
def test_depthwise_conv2d(config: ConfigConv2d = None):
    if config is None:
        config = ConfigConv2d()

    tf.random.set_seed(config.seed)
    gpflow_config.set_default_float(config.floatx)
    gpflow_config.set_default_jitter(config.jitter)

    X_shape = [config.num_test] + config.image_shape + [config.channels_in]
    X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape)
    X /= tf.reduce_max(X)

    patch_len = int(tf.reduce_prod(config.patch_shape))
    for cls in SupportedBaseKernels:
        minval = config.rel_lengthscales_min * (patch_len**0.5)
        maxval = config.rel_lengthscales_max * (patch_len**0.5)
        lenscales = tf.random.uniform(shape=[config.channels_in, patch_len],
                                      minval=minval,
                                      maxval=maxval,
                                      dtype=floatx())

        base = cls(lengthscales=lenscales, variance=config.kernel_variance)
        kern = kernels.DepthwiseConv2d(kernel=base,
                                       image_shape=config.image_shape,
                                       patch_shape=config.patch_shape,
                                       channels_in=config.channels_in,
                                       channels_out=config.channels_out,
                                       dilations=config.dilations,
                                       padding=config.padding,
                                       strides=config.strides)

        kern._weights = tf.random.normal(kern._weights.shape, dtype=floatx())

        # Test full and shared inducing images
        Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
        Zsrc = tf.random.normal(Z_shape, dtype=floatx())
        for Z in (DepthwiseInducingImages(Zsrc),
                  SharedDepthwiseInducingImages(Zsrc[..., :1],
                                                config.channels_in)):

            test = _Kfu_depthwise_conv2d_fallback(Z, kern, X)
            allclose(covariances.Kfu(Z, kern, X), test)
Exemple #8
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_cond, config.input_dims],
                                  dtype=floatx())
            Xnew = tf.random.uniform([config.num_test, config.input_dims],
                                     dtype=floatx())
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                kern = cls(lengthscales=lenscales,
                           variance=config.kernel_variance)
                const = tf.random.normal([1], dtype=floatx())

                K = kern(X, full_cov=True)
                K = tf.linalg.set_diag(
                    K,
                    tf.linalg.diag_part(K) + config.noise_variance)
                L = tf.linalg.cholesky(K)
                y = L @ tf.random.normal([L.shape[-1], 1],
                                         dtype=floatx()) + const

                model = GPR(kernel=kern,
                            noise_variance=config.noise_variance,
                            data=(X, y),
                            mean_function=mean_functions.Constant(c=const))

                mf, Sff = subroutine(config, model, Xnew)
                mg, Sgg = model.predict_f(Xnew, full_cov=True)

                tol = config.error_tol
                assert allclose(mf, mg, tol, tol)
                assert allclose(Sff, Sgg, tol, tol)
Exemple #9
0
def test_conv2d(config: ConfigFourierConv2d = None):
  """
  TODO: Consider separating out the test for Conv2dTranspose since it only
  supports a subset of strides/dilatons.
  """
  if config is None:
    config = ConfigFourierConv2d()

  tf.random.set_seed(config.seed)
  gpflow_config.set_default_float(config.floatx)
  gpflow_config.set_default_jitter(config.jitter)

  X_shape = [config.num_test] + config.image_shape + [config.channels_in]
  X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()), X_shape)
  X /= tf.reduce_max(X)

  Z_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
  Zsrc = tf.random.normal(Z_shape, dtype=floatx())
  Z = inducing_variables.InducingImages(Zsrc)

  patch_len = config.channels_in * config.patch_shape[0] * config.patch_shape[1]
  for base_cls in SupportedBaseKernels:
    minval = config.rel_lengthscales_min * (patch_len ** 0.5)
    maxval = config.rel_lengthscales_max * (patch_len ** 0.5)
    lenscales = tf.random.uniform(shape=[patch_len],
                                  minval=minval,
                                  maxval=maxval,
                                  dtype=floatx())

    base = base_cls(lengthscales=lenscales, variance=config.kernel_variance)
    for cls in (kernels.Conv2d, kernels.Conv2dTranspose):
      kern = cls(kernel=base,
                 image_shape=config.image_shape,
                 patch_shape=config.patch_shape,
                 channels_in=config.channels_in,
                 channels_out=config.num_latent_gps,
                 dilations=config.dilations,
                 strides=config.strides)

      _test_fourier_conv2d_common(config, kern, X, Z)
Exemple #10
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X = tf.random.uniform([config.num_test, config.input_dims],
                                  dtype=floatx())
            allK = []
            allZ = []
            Z_shape = config.num_cond, config.input_dims
            for cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (config.input_dims**0.5)
                maxval = config.rel_lengthscales_max * (config.input_dims**0.5)
                lenscales = tf.random.uniform(shape=[config.input_dims],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                rel_variance = tf.random.uniform(shape=[],
                                                 minval=0.9,
                                                 maxval=1.1,
                                                 dtype=floatx())

                allK.append(
                    cls(lengthscales=lenscales,
                        variance=config.kernel_variance * rel_variance))

                allZ.append(
                    InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())))

            kern = kernels.SeparateIndependent(allK)
            Z = SeparateIndependentInducingVariables(allZ)

            Kuu = covariances.Kuu(Z,
                                  kern,
                                  jitter=gpflow_config.default_jitter())
            q_sqrt = tf.linalg.cholesky(Kuu)\
                     * tf.random.uniform(shape=[kern.num_latent_gps, 1, 1],
                                         minval=0.0,
                                         maxval=0.5,
                                         dtype=floatx())

            const = tf.random.normal([len(kern.kernels)], dtype=floatx())
            model = SVGP(kernel=kern,
                         likelihood=None,
                         inducing_variable=Z,
                         mean_function=mean_functions.Constant(c=const),
                         q_sqrt=q_sqrt,
                         whiten=False,
                         num_latent_gps=len(allK))

            mf, Sff = subroutine(config, model, X)
            mg, Sgg = model.predict_f(X, full_cov=True)
            tol = config.error_tol
            assert allclose(mf, mg, tol, tol)
            assert allclose(Sff, Sgg, tol, tol)
Exemple #11
0
def test_depthwise_conv2d(config: ConfigFourierConv2d = None):
  if config is None:
    config = ConfigFourierConv2d()

  assert config.num_bases % config.channels_in == 0
  tf.random.set_seed(config.seed)
  gpflow_config.set_default_float(config.floatx)
  gpflow_config.set_default_jitter(config.jitter)

  X_shape = [config.num_test] + config.image_shape + [config.channels_in]
  X = tf.random.uniform(X_shape, dtype=floatx())

  img_shape = [config.num_cond] + config.patch_shape + [config.channels_in]
  Zsrc = tf.random.normal(img_shape, dtype=floatx())
  Z = inducing_variables.DepthwiseInducingImages(Zsrc)

  patch_len = config.patch_shape[0] * config.patch_shape[1]
  for base_cls in SupportedBaseKernels:
    minval = config.rel_lengthscales_min * (patch_len ** 0.5)
    maxval = config.rel_lengthscales_max * (patch_len ** 0.5)
    lenscales = tf.random.uniform(shape=[config.channels_in, patch_len],
                                  minval=minval,
                                  maxval=maxval,
                                  dtype=floatx())

    base = base_cls(lengthscales=lenscales, variance=config.kernel_variance)
    for cls in (kernels.DepthwiseConv2d,):
      kern = cls(kernel=base,
                 image_shape=config.image_shape,
                 patch_shape=config.patch_shape,
                 channels_in=config.channels_in,
                 channels_out=config.num_latent_gps,
                 dilations=config.dilations,
                 strides=config.strides)

      _test_fourier_conv2d_common(config, kern, X, Z)
Exemple #12
0
def _test_cg_svgp(config: ConfigDense,
                  model: SVGP,
                  Xnew: tf.Tensor) -> tf.Tensor:
  """
  Sample generation subroutine common to each unit test
  """
  # Prepare preconditioner for CG
  Z = model.inducing_variable
  Kff = covariances.Kuu(Z, model.kernel, jitter=0)
  max_rank = config.num_cond//(2 if config.num_cond > 1 else 1)
  preconditioner = get_default_preconditioner(Kff,
                                              diag=default_jitter(),
                                              max_rank=max_rank)

  count = 0
  samples = []
  L_joint = None
  while count < config.num_samples:
    # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$
    size = min(config.shard_size, config.num_samples - count)
    shape = model.num_latent_gps, config.num_cond, size
    rvs = tf.random.normal(shape=shape, dtype=floatx())
    u = tf.transpose(model.q_sqrt @ rvs)

    # Generate draws from the joint distribution $p(f(X), g(Z))$
    (f, fnew), L_joint = common.sample_joint(model.kernel,
                                             Z,
                                             Xnew,
                                             num_samples=size,
                                             L=L_joint)

    # Solve for update functions
    update_fns = cg_update(model.kernel,
                           Z,
                           u,
                           f,
                           tol=1e-6,
                           max_iter=config.num_cond,
                           preconditioner=preconditioner)

    samples.append(fnew + update_fns(Xnew))
    count += size

  samples = tf.concat(samples, axis=0)
  if model.mean_function is not None:
    samples += model.mean_function(Xnew)
  return samples
Exemple #13
0
def _sample_joint_inducing(kern,
                           Z,
                           Xnew,
                           num_samples: int,
                           L: TensorLike = None,
                           diag: Union[float, tf.Tensor] = None):
    """
  Sample from the joint distribution of $f(X), g(Z)$ via a
  location-scale transform.
  """
    if diag is None:
        diag = default_jitter()

    # Construct joint covariance and compute matrix square root
    has_multiple_outputs = isinstance(kern, MultioutputKernel)
    if L is None:
        if has_multiple_outputs:
            Kff = kern(Xnew, full_cov=True, full_output_cov=False)
        else:
            Kff = kern(Xnew, full_cov=True)
        Kuu = covariances.Kuu(Z, kern, jitter=0.0)
        Kuf = covariances.Kuf(Z, kern, Xnew)
        if isinstance(kern, SharedIndependent) and \
           isinstance(Z, SharedIndependentInducingVariables):
            Kuu = tf.tile(Kuu[None], [Kff.shape[0], 1, 1])
            Kuf = tf.tile(Kuf[None], [Kff.shape[0], 1, 1])

        K = tf.concat([
            tf.concat([Kuu, Kuf], axis=-1),
            tf.concat([tf.linalg.adjoint(Kuf), Kff], axis=-1)
        ],
                      axis=-2)

        K = tf.linalg.set_diag(K, tf.linalg.diag_part(K) + diag)
        L = tf.linalg.cholesky(K)

    # Draw samples using a location-scale transform
    rvs = tf.random.normal(list(L.shape[:-1]) + [num_samples], dtype=floatx())
    draws = L @ rvs  # [L, M + N, S] or [M + N, S]
    if not has_multiple_outputs:
        draws = tf.expand_dims(draws, 0)

    return tf.split(tf.transpose(draws), [-1, Xnew.shape[0]], axis=-2), L
Exemple #14
0
def _sample_joint_fallback(kern,
                           X,
                           Xnew,
                           num_samples: int,
                           L: TensorLike = None,
                           diag: TensorLike = None):
    """
  Sample from the joint distribution of $f(X), g(Z)$ via a
  location-scale transform.
  """
    if diag is None:
        diag = default_jitter()

    if L is None:
        K = kern(tf.concat([X, Xnew], axis=-2), full_cov=True)
        K = tf.linalg.set_diag(K, tf.linalg.diag_part(K) + diag)
        L = tf.linalg.cholesky(K)

    # Draw samples using a location-scale transform
    rvs = tf.random.normal(list(L.shape[:-1]) + [num_samples], dtype=floatx())
    draws = tf.expand_dims(L @ rvs, 0)  # [1, N + T, S]
    return tf.split(tf.transpose(draws), [-1, Xnew.shape[0]], axis=-2), L
Exemple #15
0
def _test_exact_svgp(config: Union[ConfigDense, ConfigConv2d], model: SVGP,
                     Xnew: tf.Tensor) -> tf.Tensor:
    """
  Sample generation subroutine common to each unit test
  """
    # Precompute Cholesky factor (optional)
    Z = model.inducing_variable
    Kuu = covariances.Kuu(Z, model.kernel, jitter=default_jitter())
    Luu = tf.linalg.cholesky(Kuu)

    count = 0
    L_joint = None
    samples = []
    while count < config.num_samples:
        # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$
        size = min(config.shard_size, config.num_samples - count)
        shape = model.num_latent_gps, config.num_cond, size
        rvs = tf.random.normal(shape=shape, dtype=floatx())
        u = tf.transpose(model.q_sqrt @ rvs)

        # Generate draws from the joint distribution $p(f(X), g(Z))$
        (f, fnew), L_joint = common.sample_joint(model.kernel,
                                                 Z,
                                                 Xnew,
                                                 num_samples=size,
                                                 L=L_joint)

        # Solve for update functions
        update_fns = exact_update(model.kernel, Z, u, f, L=Luu)
        samples.append(fnew + update_fns(Xnew))
        count += size

    samples = tf.concat(samples, axis=0)
    if model.mean_function is not None:
        samples += model.mean_function(Xnew)
    return samples
Exemple #16
0
        def main(config):
            assert config is not None, ValueError
            tf.random.set_seed(config.seed)
            gpflow_config.set_default_float(config.floatx)
            gpflow_config.set_default_jitter(config.jitter)

            X_shape = [config.num_test
                       ] + config.image_shape + [config.channels_in]
            X = tf.reshape(tf.range(tf.reduce_prod(X_shape), dtype=floatx()),
                           X_shape)
            X /= tf.reduce_max(X)

            patch_len = config.channels_in * int(
                tf.reduce_prod(config.patch_shape))
            for base_cls in SupportedBaseKernels:
                minval = config.rel_lengthscales_min * (patch_len**0.5)
                maxval = config.rel_lengthscales_max * (patch_len**0.5)
                lenscales = tf.random.uniform(shape=[patch_len],
                                              minval=minval,
                                              maxval=maxval,
                                              dtype=floatx())

                base = base_cls(lengthscales=lenscales,
                                variance=config.kernel_variance)
                Z_shape = [config.num_cond
                           ] + config.patch_shape + [config.channels_in]
                for cls in (kernels_ext.Conv2d, kernels_ext.Conv2dTranspose):
                    kern = cls(kernel=base,
                               image_shape=config.image_shape,
                               patch_shape=config.patch_shape,
                               channels_in=config.channels_in,
                               channels_out=config.num_latent_gps,
                               strides=config.strides,
                               padding=config.padding,
                               dilations=config.dilations)

                    Z = InducingImages(
                        tf.random.uniform(Z_shape, dtype=floatx()))
                    q_sqrt = tf.linalg.cholesky(covariances.Kuu(Z, kern))
                    q_sqrt *= tf.random.uniform([config.num_latent_gps, 1, 1],
                                                minval=0.0,
                                                maxval=0.5,
                                                dtype=floatx())

                    # TODO: GPflow's SVGP class is not setup to support outputs defined
                    #       as spatial feature maps. For now, we content ourselves with
                    #       the following hack...
                    const = tf.random.normal([config.num_latent_gps],
                                             dtype=floatx())
                    mean_function = lambda x: const

                    model = SVGP(kernel=kern,
                                 likelihood=None,
                                 mean_function=mean_function,
                                 inducing_variable=Z,
                                 q_sqrt=q_sqrt,
                                 whiten=False,
                                 num_latent_gps=config.num_latent_gps)

                    mf, Sff = subroutine(config, model, X)
                    mg, Sgg = model.predict_f(X, full_cov=True)

                    tol = config.error_tol
                    assert allclose(mf, mg, tol, tol)
                    assert allclose(Sff, Sgg, tol, tol)