def _test_linear_svgp(config: ConfigDense, model: SVGP, Xnew: tf.Tensor) -> tf.Tensor: """ Sample generation subroutine common to each unit test """ Z = model.inducing_variable count = 0 basis = fourier_basis(model.kernel, num_bases=config.num_bases) L_joint = None samples = [] while count < config.num_samples: # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$ size = min(config.shard_size, config.num_samples - count) shape = model.num_latent_gps, config.num_cond, size rvs = tf.random.normal(shape=shape, dtype=floatx()) u = tf.transpose(model.q_sqrt @ rvs) # Generate draws from the joint distribution $p(f(X), g(Z))$ (f, fnew), L_joint = common.sample_joint(model.kernel, Z, Xnew, num_samples=size, L=L_joint) # Solve for update functions update_fns = linear_update(Z, u, f, basis=basis) samples.append(fnew + update_fns(Xnew)) count += size samples = tf.concat(samples, axis=0) if model.mean_function is not None: samples += model.mean_function(Xnew) return samples
def _test_cg_svgp(config: ConfigDense, model: SVGP, Xnew: tf.Tensor) -> tf.Tensor: """ Sample generation subroutine common to each unit test """ # Prepare preconditioner for CG Z = model.inducing_variable Kff = covariances.Kuu(Z, model.kernel, jitter=0) max_rank = config.num_cond//(2 if config.num_cond > 1 else 1) preconditioner = get_default_preconditioner(Kff, diag=default_jitter(), max_rank=max_rank) count = 0 samples = [] L_joint = None while count < config.num_samples: # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$ size = min(config.shard_size, config.num_samples - count) shape = model.num_latent_gps, config.num_cond, size rvs = tf.random.normal(shape=shape, dtype=floatx()) u = tf.transpose(model.q_sqrt @ rvs) # Generate draws from the joint distribution $p(f(X), g(Z))$ (f, fnew), L_joint = common.sample_joint(model.kernel, Z, Xnew, num_samples=size, L=L_joint) # Solve for update functions update_fns = cg_update(model.kernel, Z, u, f, tol=1e-6, max_iter=config.num_cond, preconditioner=preconditioner) samples.append(fnew + update_fns(Xnew)) count += size samples = tf.concat(samples, axis=0) if model.mean_function is not None: samples += model.mean_function(Xnew) return samples
def _test_exact_svgp(config: Union[ConfigDense, ConfigConv2d], model: SVGP, Xnew: tf.Tensor) -> tf.Tensor: """ Sample generation subroutine common to each unit test """ # Precompute Cholesky factor (optional) Z = model.inducing_variable Kuu = covariances.Kuu(Z, model.kernel, jitter=default_jitter()) Luu = tf.linalg.cholesky(Kuu) count = 0 L_joint = None samples = [] while count < config.num_samples: # Sample $u ~ N(q_mu, q_sqrt q_sqrt^{T})$ size = min(config.shard_size, config.num_samples - count) shape = model.num_latent_gps, config.num_cond, size rvs = tf.random.normal(shape=shape, dtype=floatx()) u = tf.transpose(model.q_sqrt @ rvs) # Generate draws from the joint distribution $p(f(X), g(Z))$ (f, fnew), L_joint = common.sample_joint(model.kernel, Z, Xnew, num_samples=size, L=L_joint) # Solve for update functions update_fns = exact_update(model.kernel, Z, u, f, L=Luu) samples.append(fnew + update_fns(Xnew)) count += size samples = tf.concat(samples, axis=0) if model.mean_function is not None: samples += model.mean_function(Xnew) return samples