def test_check_needless(): complete_prior = { "lengthscale": tfd.Gamma(1.0, 1.0), "variance": tfd.Gamma(2.0, 2.0), "obs_noise": tfd.Gamma(3.0, 3.0), "latent": tfd.Normal(loc=0.0, scale=1.0), } posterior = Prior(kernel=RBF()) * Bernoulli() priors = prior_checks(posterior, complete_prior) assert priors == complete_prior
def prior_checks(gp: NonConjugatePosterior, priors: dict) -> dict: if "latent" in priors.keys(): latent_prior = priors["latent"] if latent_prior.name != "Normal": warnings.warn( f"A {latent_prior.name} distribution prior has been placed on the latent function. It is strongly afvised that a unit-Gaussian prior is used." ) return priors else: priors["latent"] = tfd.Normal(loc=0.0, scale=1.0) return priors
def test_transformed_distributions(): from tensorflow_probability.substrates.jax import ( bijectors as tfb, distributions as tfd, ) d = dist.TransformedDistribution(dist.Normal(0, 1), dist.transforms.ExpTransform()) d1 = tfd.TransformedDistribution(tfd.Normal(0, 1), tfb.Exp()) x = random.normal(random.PRNGKey(0), (1000,)) d_x = d.log_prob(x).sum() d1_x = d1.log_prob(x).sum() assert_allclose(d_x, d1_x)
def test_sample_unwrapped_mixture_same_family(): from tensorflow_probability.substrates.jax import distributions as tfd # test no error is raised with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)): numpyro.sample( "sample", tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical(probs=[0.3, 0.7]), components_distribution=tfd.Normal( loc=[-1.0, 1], scale=[0.1, 0.5] # One for each component. ), ), )
def mll( params: dict, x: Array, y: Array, priors: dict = {"latent": tfd.Normal(loc=0.0, scale=1.0)} ): params = transform(params) n = x.shape[0] link = link_function(gp.likelihood) gram_matrix = gram(gp.prior.kernel, x, params) gram_matrix += I(n) * jitter L = jnp.linalg.cholesky(gram_matrix) F = jnp.matmul(L, params["latent"]) rv = link(F) ll = jnp.sum(rv.log_prob(y)) priors = prior_checks(gp, priors) log_prior_density = evaluate_prior(params, priors) constant = jnp.array(-1.0) if negative else jnp.array(1.0) return constant * (ll + log_prior_density)
def mll( params: dict, training: Dataset, priors: dict = {"latent": tfd.Normal(loc=0.0, scale=1.0)}, static_params: dict = None, ): x, y = training.X, training.y n = training.n params = transform(params) if static_params: params = concat_dictionaries(params, transform(static_params)) link = link_function(gp.likelihood) gram_matrix = gram(gp.prior.kernel, x, params) gram_matrix += I(n) * jitter L = jnp.linalg.cholesky(gram_matrix) F = jnp.matmul(L, params["latent"]) rv = link(F) ll = jnp.sum(rv.log_prob(y)) priors = prior_checks(gp, priors) log_prior_density = evaluate_prior(params, priors) constant = jnp.array(-1.0) if negative else jnp.array(1.0) return constant * (ll + log_prior_density)
def test_lpd(x): val = jnp.array(x) dist = tfd.Normal(loc=0.0, scale=1.0) lpd = log_density(val, dist) assert lpd is not None
def model(labels): coefs = numpyro.sample("coefs", tfd.Normal(jnp.zeros(dim), jnp.ones(dim))) logits = numpyro.deterministic("logits", jnp.sum(coefs * data, axis=-1)) return numpyro.sample("obs", tfd.Bernoulli(logits=logits), obs=labels)
def f(x): with numpyro.handlers.seed(rng_seed=0), numpyro.handlers.trace() as tr: numpyro.sample("x", tfd.Normal(x, 1)) return tr["x"]["fn"]
def spectral_density(kernel: SpectralRBF) -> tfd.Distribution: return tfd.Normal(loc=jnp.array(0.0), scale=jnp.array(1.0))