Beispiel #1
0
 def guide():
     loc = numpyro.param("loc", np.zeros(3))
     cov = numpyro.param("cov",
                         np.eye(3),
                         constraint=constraints.positive_definite)
     x = numpyro.sample("x", dist.MultivariateNormal(loc, cov))
     with numpyro.plate("plate", len(data)):
         with handlers.mask(mask=np.invert(mask)):
             numpyro.sample("y_unobserved",
                            dist.MultivariateNormal(x, np.eye(3)))
Beispiel #2
0
 def model():
     x = numpyro.sample("x",
                        dist.MultivariateNormal(np.zeros(3), np.eye(3)))
     with numpyro.plate("plate", len(data)):
         y = numpyro.sample("y",
                            dist.MultivariateNormal(x, np.eye(3)),
                            obs=data,
                            obs_mask=mask)
         if not_jax_tracer(y):
             assert ((y == data).all(-1) == mask).all()
Beispiel #3
0
 def model():
     x0 = numpyro.sample(
         "x0", dist.MultivariateNormal(loc=jnp.zeros(D), covariance_matrix=cov00)
     )
     posterior_loc1 = jnp.matmul(cov_10_cov00_inv, x0)
     numpyro.sample(
         "x1",
         dist.MultivariateNormal(
             loc=posterior_loc1, covariance_matrix=posterior_cov1
         ),
     )
Beispiel #4
0
 def kernel(x, y):
     if self.precond_mode == 'const':
         wxs = jnp.array([1.])
         wys = jnp.array([1.])
     else:
         wxs = jax.nn.softmax(
             jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(x))(particles, qs_inv))
         wys = jax.nn.softmax(
             jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(y))(particles, qs_inv))
     return jnp.sum(
         jax.vmap(lambda qs, qis, wx, wy: wx * wy * (qis @ inner_kernel(qs @ x, qs @ y) @ qis.transpose()))(
             qs_sqrt, qs_inv_sqrt, wxs, wys), axis=0)
Beispiel #5
0
 def model(cov):
     w = numpyro.sample("w", dist.Normal(0, 1000).expand([2]).to_event(1))
     x = numpyro.sample("x", dist.Normal(0, 1000).expand([1]).to_event(1))
     y = numpyro.sample("y", dist.Normal(0, 1000).expand([1]).to_event(1))
     z = numpyro.sample("z", dist.Normal(0, 1000).expand([1]).to_event(1))
     wxyz = jnp.concatenate([w, x, y, z])
     numpyro.sample("obs", dist.MultivariateNormal(jnp.zeros(5), cov), obs=wxyz)
Beispiel #6
0
def twoh_c_kf(T=None, T_forecast=15, obs=None):
    """Define Kalman Filter with two hidden variates."""
    T = len(obs) if T is None else T
    
    # Define priors over beta, tau, sigma, z_1 (keep the shapes in mind)
    #W = numpyro.sample(name="W", fn=dist.Normal(loc=jnp.zeros((2,4)), scale=jnp.ones((2,4))))
    beta = numpyro.sample(name="beta", fn=dist.Normal(loc=jnp.array([0.,0.]), scale=jnp.ones(2)))
    tau = numpyro.sample(name="tau", fn=dist.HalfCauchy(scale=jnp.ones(2)))
    sigma = numpyro.sample(name="sigma", fn=dist.HalfCauchy(scale=.1))
    z_prev = numpyro.sample(name="z_1", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2)))
    # Define LKJ prior
    L_Omega = numpyro.sample("L_Omega", dist.LKJCholesky(2, 10.))
    Sigma_lower = jnp.matmul(jnp.diag(jnp.sqrt(tau)), L_Omega) # lower cholesky factor of the covariance matrix
    noises = numpyro.sample("noises", fn=dist.MultivariateNormal(loc=jnp.zeros(2), scale_tril=Sigma_lower), sample_shape=(T+T_forecast,))
    # Propagate the dynamics forward using jax.lax.scan
    carry = (beta, z_prev, tau)
    z_collection = [z_prev]
    carry, zs_exp = lax.scan(f, carry, noises, T+T_forecast)
    z_collection = jnp.concatenate((jnp.array(z_collection), zs_exp), axis=0)

    c = numpyro.sample(name="c", fn=dist.Normal(loc=jnp.array([[0.], [0.]]), scale=jnp.ones((2,1))))
    obs_mean = jnp.dot(z_collection[:T,:], c).squeeze()
    pred_mean = jnp.dot(z_collection[T:,:], c).squeeze()

    # Sample the observed y (y_obs)
    numpyro.sample(name="y_obs", fn=dist.Normal(loc=obs_mean, scale=sigma), obs=obs)
    numpyro.sample(name="y_pred", fn=dist.Normal(loc=pred_mean, scale=sigma), obs=None)
Beispiel #7
0
 def get_posterior(self, params):
     """
     Returns a multivariate Normal posterior distribution.
     """
     transform = self.get_transform(params)
     return dist.MultivariateNormal(transform.loc,
                                    scale_tril=transform.scale_tril)
Beispiel #8
0
    def sample_kernel(sa_state, model_args=(), model_kwargs=None):
        pe_fn = potential_fn
        if potential_fn_gen:
            pe_fn = potential_fn_gen(*model_args, **model_kwargs)
        zs, pes, loc, scale = sa_state.adapt_state
        # we recompute loc/scale after each iteration to avoid precision loss
        # XXX: consider to expose a setting to do this job periodically
        # to save some computations
        loc = jnp.mean(zs, 0)
        if scale.ndim == 2:
            cov = jnp.cov(zs, rowvar=False, bias=True)
            if cov.shape == ():  # JAX returns scalar for 1D input
                cov = cov.reshape((1, 1))
            cholesky = jnp.linalg.cholesky(cov)
            scale = jnp.where(jnp.any(jnp.isnan(cholesky)), scale, cholesky)
        else:
            scale = jnp.std(zs, 0)

        rng_key, rng_key_z, rng_key_reject, rng_key_accept = random.split(sa_state.rng_key, 4)
        _, unravel_fn = ravel_pytree(sa_state.z)

        z = loc + _sample_proposal(scale, rng_key_z)
        pe = pe_fn(unravel_fn(z))
        pe = jnp.where(jnp.isnan(pe), jnp.inf, pe)
        diverging = (pe - sa_state.potential_energy) > max_delta_energy

        # NB: all terms having the pattern *s will have shape N x ...
        # and all terms having the pattern *s_ will have shape (N + 1) x ...
        locs, scales = _get_proposal_loc_and_scale(zs, loc, scale, z)
        zs_ = jnp.concatenate([zs, z[None, :]])
        pes_ = jnp.concatenate([pes, pe[None]])
        locs_ = jnp.concatenate([locs, loc[None, :]])
        scales_ = jnp.concatenate([scales, scale[None, ...]])
        if scale.ndim == 2:  # dense_mass
            log_weights_ = dist.MultivariateNormal(locs_, scale_tril=scales_).log_prob(zs_) + pes_
        else:
            log_weights_ = dist.Normal(locs_, scales_).log_prob(zs_).sum(-1) + pes_
        # mask invalid values (nan, +inf) by -inf
        log_weights_ = jnp.where(jnp.isfinite(log_weights_), log_weights_, -jnp.inf)
        # get rejecting index
        j = random.categorical(rng_key_reject, log_weights_)
        zs = _numpy_delete(zs_, j)
        pes = _numpy_delete(pes_, j)
        loc = locs_[j]
        scale = scales_[j]
        adapt_state = SAAdaptState(zs, pes, loc, scale)

        # NB: weights[-1] / sum(weights) is the probability of rejecting the new sample `z`.
        accept_prob = 1 - jnp.exp(log_weights_[-1] - logsumexp(log_weights_))
        itr = sa_state.i + 1
        n = jnp.where(sa_state.i < wa_steps, itr, itr - wa_steps)
        mean_accept_prob = sa_state.mean_accept_prob + (accept_prob - sa_state.mean_accept_prob) / n

        # XXX: we make a modification of SA sampler in [1]
        # in [1], each MCMC state contains N points `zs`
        # here we do resampling to pick randomly a point from those N points
        k = random.categorical(rng_key_accept, jnp.zeros(zs.shape[0]))
        z = unravel_fn(zs[k])
        pe = pes[k]
        return SAState(itr, z, pe, accept_prob, mean_accept_prob, diverging, adapt_state, rng_key)
Beispiel #9
0
def GP(X, y):

    X = numpyro.deterministic("X", X)

    # Set informative priors on kernel hyperparameters.
    η = numpyro.sample("variance", dist.HalfCauchy(scale=5.0))
    ℓ = numpyro.sample("length_scale", dist.Gamma(2.0, 1.0))
    σ = numpyro.sample("obs_noise", dist.HalfCauchy(scale=5.0))

    # Compute kernel
    K = rbf_kernel(X, X, η, ℓ)
    K = add_to_diagonal(K, σ)
    K = add_to_diagonal(K, wandb.config.jitter)
    # cholesky decomposition
    Lff = numpyro.deterministic("Lff", cholesky(K, lower=True))

    # Sample y according to the standard gaussian process formula
    return numpyro.sample(
        "y",
        dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]),
                                scale_tril=Lff).expand_by(
                                    y.shape[:-1])  # for multioutput scenarios
        .to_event(y.ndim - 1),
        obs=y,
    )
    def likelihood(self, X, Y):
        """
        The likelihood model for Gaussian Process
        :param X: Features
        :param Y: Targets
        :return: Samples from the a Gaussian Process model.
        """
        params_samples = {}
        for param in self.params_priors:
            if "distribution" in str(type(self.params_priors[param])):
                params_samples[param] = numpyro.sample(param, self.params_priors[param])
            if type(self.params_priors[param]) == float:
                params_samples[param] = numpyro.param(param, self.params_priors[param])

        noise = params_samples.get("noise", 0)
        k = self.kernel(X, X, params_samples) + (noise + self.jitter) * np.eye(
            X.shape[0]
        )

        # sample Y according to the standard gaussian process formula
        numpyro.sample(
            "Y",
            dist.MultivariateNormal(loc=np.zeros(X.shape[0]), covariance_matrix=k),
            obs=Y,
        )
Beispiel #11
0
    def obyo(y, tag, nusdx, nus, mdbCO, mdbH2O, cdbH2H2, cdbH2He):
        #CO
        SijM_CO, ngammaLM_CO, nsigmaDl_CO = exomol(mdbCO, Tarr, Parr, R_CO,
                                                   molmassCO)
        xsm_CO = xsmatrix(cnu_CO, indexnu_CO, R_CO, pmarray_CO, nsigmaDl_CO,
                          ngammaLM_CO, SijM_CO, nus, dgm_ngammaL_CO)
        dtaumCO = dtauM(dParr, jnp.abs(xsm_CO), MMR_CO * ONEARR, molmassCO, g)

        #H2O
        SijM_H2O, ngammaLM_H2O, nsigmaDl_H2O = exomol(mdbH2O, Tarr, Parr,
                                                      R_H2O, molmassH2O)
        xsm_H2O = xsmatrix(cnu_H2O, indexnu_H2O, R_H2O, pmarray_H2O,
                           nsigmaDl_H2O, ngammaLM_H2O, SijM_H2O, nus,
                           dgm_ngammaL_H2O)
        dtaumH2O = dtauM(dParr, jnp.abs(xsm_H2O), MMR_H2O * ONEARR, molmassH2O,
                         g)

        #CIA
        dtaucH2H2=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrH2,\
                          mmw,g,cdbH2H2.nucia,cdbH2H2.tcia,cdbH2H2.logac)
        dtaucH2He=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrHe,\
                          mmw,g,cdbH2He.nucia,cdbH2He.tcia,cdbH2He.logac)

        dtau = dtaumCO + dtaumH2O + dtaucH2H2 + dtaucH2He
        sourcef = planck.piBarr(Tarr, nus)

        Ftoa = Fref / Rp**2
        F0 = rtrun(dtau, sourcef) / baseline / Ftoa

        Frot = response.rigidrot(nus, F0, vsini, u1, u2)
        mu = response.ipgauss_sampling(nusdx, nus, Frot, beta, RV)
        cov = gpkernel_RBF(nu1, tau, a, e1)
        sample(tag,
               dist.MultivariateNormal(loc=mu, covariance_matrix=cov),
               obs=y)
def model(X, Y, hypers):
    S, P, N = hypers['expected_sparsity'], X.shape[1], X.shape[0]

    sigma = numpyro.sample("sigma", dist.HalfNormal(hypers['alpha3']))
    phi = sigma * (S / np.sqrt(N)) / (P - S)
    eta1 = numpyro.sample("eta1", dist.HalfCauchy(phi))

    msq = numpyro.sample("msq",
                         dist.InverseGamma(hypers['alpha1'], hypers['beta1']))
    xisq = numpyro.sample("xisq",
                          dist.InverseGamma(hypers['alpha2'], hypers['beta2']))

    eta2 = np.square(eta1) * np.sqrt(xisq) / msq

    lam = numpyro.sample("lambda", dist.HalfCauchy(np.ones(P)))
    kappa = np.sqrt(msq) * lam / np.sqrt(msq + np.square(eta1 * lam))

    # sample observation noise
    var_obs = numpyro.sample(
        "var_obs", dist.InverseGamma(hypers['alpha_obs'], hypers['beta_obs']))

    # compute kernel
    kX = kappa * X
    k = kernel(kX, kX, eta1, eta2, hypers['c']) + var_obs * np.eye(N)
    assert k.shape == (N, N)

    # sample Y according to the standard gaussian process formula
    numpyro.sample("Y",
                   dist.MultivariateNormal(loc=np.zeros(X.shape[0]),
                                           covariance_matrix=k),
                   obs=Y)
Beispiel #13
0
    def model(self, home_team, away_team):

        sigma_a = pyro.sample("sigma_a", dist.HalfNormal(1.0))
        sigma_b = pyro.sample("sigma_b", dist.HalfNormal(1.0))
        mu_b = pyro.sample("mu_b", dist.Normal(0.0, 1.0))
        rho_raw = pyro.sample("rho_raw", dist.Beta(2, 2))
        rho = 2.0 * rho_raw - 1.0

        log_gamma = pyro.sample("log_gamma", dist.Normal(0, 1))

        with pyro.plate("teams", self.n_teams):
            abilities = pyro.sample(
                "abilities",
                dist.MultivariateNormal(
                    np.array([0.0, mu_b]),
                    covariance_matrix=np.array([
                        [sigma_a**2.0, rho * sigma_a * sigma_b],
                        [rho * sigma_a * sigma_b, sigma_b**2.0],
                    ]),
                ),
            )

        log_a = abilities[:, 0]
        log_b = abilities[:, 1]
        home_inds = np.array([self.team_to_index[team] for team in home_team])
        away_inds = np.array([self.team_to_index[team] for team in away_team])
        home_rate = np.exp(log_a[home_inds] + log_b[away_inds] + log_gamma)
        away_rate = np.exp(log_a[away_inds] + log_b[home_inds])

        pyro.sample("home_goals", dist.Poisson(home_rate).to_event(1))
        pyro.sample("away_goals", dist.Poisson(away_rate).to_event(1))
Beispiel #14
0
 def sample_posterior(self, rng_key, params, sample_shape=()):
     transform = self._get_transform(params)
     loc, scale_tril = transform.loc, transform.scale_tril
     latent_sample = dist.MultivariateNormal(loc,
                                             scale_tril=scale_tril).sample(
                                                 rng_key, sample_shape)
     return self._unpack_and_constrain(latent_sample, params)
def rethinking_model(B, M, K):
    # priors
    a = numpyro.sample("a", dist.Normal(0, 0.5))
    muB = numpyro.sample("muB", dist.Normal(0, 0.5))
    muM = numpyro.sample("muM", dist.Normal(0, 0.5))
    bB = numpyro.sample("bB", dist.Normal(0, 0.5))
    bM = numpyro.sample("bM", dist.Normal(0, 0.5))
    sigma = numpyro.sample("sigma", dist.Exponential(1))
    Rho_BM = numpyro.sample("Rho_BM", dist.LKJ(2, 2))
    Sigma_BM = numpyro.sample("Sigma_BM", dist.Exponential(1).expand([2]))

    # define B_merge as mix of observed and imputed values
    B_impute = numpyro.sample(
        "B_impute",
        dist.Normal(0, 1).expand([int(np.isnan(B).sum())]).mask(False))
    B_merge = ops.index_update(B, np.nonzero(np.isnan(B))[0], B_impute)

    # M and B correlation
    MB = jnp.stack([M, B_merge], axis=1)
    cov = jnp.outer(Sigma_BM, Sigma_BM) * Rho_BM
    numpyro.sample("MB",
                   dist.MultivariateNormal(jnp.stack([muM, muB]), cov),
                   obs=MB)

    # K as function of B and M
    mu = a + bB * B_merge + bM * M
    numpyro.sample("K", dist.Normal(mu, sigma), obs=K)
Beispiel #16
0
def model(X, Y, hypers):
    S, P, N = hypers["expected_sparsity"], X.shape[1], X.shape[0]

    sigma = numpyro.sample("sigma", dist.HalfNormal(hypers["alpha3"]))
    phi = sigma * (S / jnp.sqrt(N)) / (P - S)
    eta1 = numpyro.sample("eta1", dist.HalfCauchy(phi))

    msq = numpyro.sample("msq",
                         dist.InverseGamma(hypers["alpha1"], hypers["beta1"]))
    xisq = numpyro.sample("xisq",
                          dist.InverseGamma(hypers["alpha2"], hypers["beta2"]))

    eta2 = jnp.square(eta1) * jnp.sqrt(xisq) / msq

    lam = numpyro.sample("lambda", dist.HalfCauchy(jnp.ones(P)))
    kappa = jnp.sqrt(msq) * lam / jnp.sqrt(msq + jnp.square(eta1 * lam))

    # compute kernel
    kX = kappa * X
    k = kernel(kX, kX, eta1, eta2, hypers["c"]) + sigma**2 * jnp.eye(N)
    assert k.shape == (N, N)

    # sample Y according to the standard gaussian process formula
    numpyro.sample(
        "Y",
        dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]),
                                covariance_matrix=k),
        obs=Y,
    )
Beispiel #17
0
    def __call__(self):
        assignment = numpyro.sample("assignment",
                                    dist.Categorical(self.weights))

        loc = self.loc[assignment]
        cov = self.cov[assignment]

        nu_max = numpyro.sample("nu_max", self.nu_max)
        log_nu_max = jnp.log10(nu_max)

        teff = numpyro.sample("teff", self.teff)

        loc0101 = loc[0:2]
        cov0101 = jnp.array([[cov[0, 0], cov[0, 1]], [cov[1, 0], cov[1, 1]]])

        L = jax.scipy.linalg.cho_factor(cov0101, lower=True)
        A = jax.scipy.linalg.cho_solve(L,
                                       jnp.array([log_nu_max, teff]) - loc0101)

        loc2323 = loc[2:]
        cov2323 = jnp.array([[cov[2, 2], cov[2, 3]], [cov[3, 2], cov[3, 3]]])

        cov0123 = jnp.array([[cov[0, 2], cov[1, 2]], [cov[0, 3], cov[1, 3]]])
        v = jax.scipy.linalg.cho_solve(L, cov0123.T)

        cond_loc = loc2323 + jnp.dot(cov0123, A)
        cond_cov = (
            cov2323 - jnp.dot(cov0123, v) +
            self.noise * jnp.eye(2)  # Add white noise
        )
        numpyro.sample("log_tau", dist.MultivariateNormal(cond_loc, cond_cov))
Beispiel #18
0
def parametric_draws(subposteriors, num_draws, diagonal=False, rng_key=None):
    """
    Merges subposteriors following (embarrassingly parallel) parametric Monte Carlo algorithm.

    **References:**

    1. *Asymptotically Exact, Embarrassingly Parallel MCMC*,
       Willie Neiswanger, Chong Wang, Eric Xing

    :param list subposteriors: a list in which each element is a collection of samples.
    :param int num_draws: number of draws from the merged posterior.
    :param bool diagonal: whether to compute weights using variance or covariance, defaults to
        `False` (using covariance).
    :param jax.random.PRNGKey rng_key: source of the randomness, defaults to `jax.random.PRNGKey(0)`.
    :return: a collection of `num_draws` samples with the same data structure as each subposterior.
    """
    rng_key = random.PRNGKey(0) if rng_key is None else rng_key
    if diagonal:
        mean, var = parametric(subposteriors, diagonal=True)
        samples_flat = dist.Normal(mean, np.sqrt(var)).sample(
            rng_key, (num_draws, ))
    else:
        mean, cov = parametric(subposteriors, diagonal=False)
        samples_flat = dist.MultivariateNormal(mean, cov).sample(
            rng_key, (num_draws, ))

    _, unravel_fn = ravel_pytree(tree_map(lambda x: x[0], subposteriors[0]))
    return vmap(lambda x: unravel_fn(x))(samples_flat)
Beispiel #19
0
    def mll(ds: Dataset):

        x, y = ds.X, ds.y

        params = {}

        for iname, iparam in numpyro_params.items():
            if iparam["param_type"] == "prior":
                params[iname] = numpyro.sample(name=iname, fn=iparam["prior"])
            else:
                params[iname] = numpyro.param(
                    name=iname,
                    init_value=iparam["init_value"],
                    constraint=iparam["constraint"],
                )
        # get mean function
        mu = gp.prior.mean_function(x)

        # covariance function
        gram_matrix = gram(gp.prior.kernel, x, params)
        gram_matrix += params["obs_noise"] * I(x.shape[0])

        # scale triangular matrix
        L = cholesky(gram_matrix, lower=True)
        return numpyro.sample(
            "y",
            dist.MultivariateNormal(loc=mu, scale_tril=L),
            obs=y.squeeze(),
        )
Beispiel #20
0
 def _get_posterior(self):
     loc = numpyro.param('{}_loc'.format(self.prefix), self._init_latent)
     scale_tril = numpyro.param('{}_scale_tril'.format(self.prefix),
                                jnp.identity(self.latent_dim) *
                                self._init_scale,
                                constraint=constraints.lower_cholesky)
     return dist.MultivariateNormal(loc, scale_tril=scale_tril)
Beispiel #21
0
 def gaussian_gibbs_fn(rng_key, hmc_sites, gibbs_sites):
     x1 = hmc_sites['x1']
     posterior_loc0 = jnp.matmul(cov_01_cov11_inv, x1)
     x0_proposal = dist.MultivariateNormal(
         loc=posterior_loc0,
         covariance_matrix=posterior_cov0).sample(rng_key)
     return {'x0': x0_proposal}
Beispiel #22
0
    def model(self, *views: np.ndarray):
        n = views[0].shape[0]
        p = [view.shape[1] for view in views]
        # mean of column in each view of data (p_1,)
        mu = [
            numpyro.sample("mu_" + str(i),
                           dist.MultivariateNormal(0., 10 * jnp.eye(p_)))
            for i, p_ in enumerate(p)
        ]
        """
        Generates cholesky factors of correlation matrices using an LKJ prior.

        The expected use is to combine it with a vector of variances and pass it
        to the scale_tril parameter of a multivariate distribution such as MultivariateNormal.

        E.g., if theta is a (positive) vector of covariances with the same dimensionality
        as this distribution, and Omega is sampled from this distribution,
        scale_tril=torch.mm(torch.diag(sqrt(theta)), Omega)
        """
        psi = [
            numpyro.sample("psi_" + str(i), dist.LKJCholesky(p_))
            for i, p_ in enumerate(p)
        ]
        # sample weights to get from latent to data space (k,p)
        with numpyro.plate("plate_views", self.latent_dims):
            self.weights_list = [
                numpyro.sample(
                    "W_" + str(i),
                    dist.MultivariateNormal(0., jnp.diag(jnp.ones(p_))))
                for i, p_ in enumerate(p)
            ]
        with numpyro.plate("plate_i", n):
            # sample from latent z - normally disributed (n,k)
            z = numpyro.sample(
                "z",
                dist.MultivariateNormal(0.,
                                        jnp.diag(jnp.ones(self.latent_dims))))
            # sample from multivariate normal and observe data
            [
                numpyro.sample("obs" + str(i),
                               dist.MultivariateNormal((z @ W_) + mu_,
                                                       scale_tril=psi_),
                               obs=X_)
                for i, (
                    X_, psi_, mu_,
                    W_) in enumerate(zip(views, psi, mu, self.weights_list))
            ]
Beispiel #23
0
    def marginal_likelihood(self, X, y, noise, is_observed=True, **kwargs):
        R"""
        Returns the marginal likelihood distribution, given the input
        locations `X` and the data `y`.
        This is integral over the product of the GP prior and a normal likelihood.
        .. math::
           y \mid X,\theta \sim \int p(y \mid f,\, X,\, \theta) \, p(f \mid X,\, \theta) \, df
        Parameters
        ----------
        name: string
            Name of the random variable
        X: array-like
            Function input values.  If one-dimensional, must be a column
            vector with shape `(n, 1)`.
        y: array-like
            Data that is the sum of the function with the GP prior and Gaussian
            noise.  Must have shape `(n, )`.
        noise: scalar, Variable, or Covariance
            Standard deviation of the Gaussian noise.  Can also be a Covariance for
            non-white noise.
        is_observed: bool
            Whether to set `y` as an `observed` variable in the `model`.
            Default is `True`.
        **kwargs
            Extra keyword arguments that are passed to `MvNormal` distribution
            constructor.
        """

        if not isinstance(noise, Covariance):
            self.noise = WhiteNoise(noise)
        else:
            self.noise = noise

        mu, cov = self._build_marginal_likelihood(X)
        _ = npy.deterministic(f"{self.name}_y", y)

        if is_observed:
            return npy.sample(
                f"{self.name}",
                dist.MultivariateNormal(loc=mu, covariance_matrix=cov),
                obs=y,
            )
        else:
            shape = infer_shape(X, kwargs.pop("shape", None))
            return npy.sample(
                f"{self.name}",
                dist.MultivariateNormal(loc=mu, covariance_matrix=cov))
Beispiel #24
0
 def _get_posterior(self):
     loc = numpyro.param("{}_loc".format(self.prefix), self._init_latent)
     scale_tril = numpyro.param(
         "{}_scale_tril".format(self.prefix),
         jnp.identity(self.latent_dim) * self._init_scale,
         constraint=self.scale_tril_constraint,
     )
     return dist.MultivariateNormal(loc, scale_tril=scale_tril)
Beispiel #25
0
 def proposal_dist(z, g):
     g = -self._preconditioner.flatten(g)
     dim = jnp.size(g)
     rho2 = jnp.clip(jnp.dot(g, g), a_min=1.0)
     covar = (self._mu2 * jnp.eye(dim) + self._lam2_minus_mu2 *
              jnp.outer(g, g) / jnp.dot(g, g)) / rho2
     return dist.MultivariateNormal(loc=self._preconditioner.flatten(z),
                                    covariance_matrix=covar)
Beispiel #26
0
def multivariate_kf(T=None, T_forecast=15, obs=None):
    """Define Kalman Filter in a multivariate fashion.

    The "time-series" are correlated. To define these relationships in
    a efficient manner, the covarianze matrix of h_t (or, equivalently, the
    noises) is drown from a Cholesky decomposed matrix.

    Parameters
    ----------
    T:  int
    T_forecast: int
    obs: np.array
       observed variable (infected, deaths...)

    """
    T = len(obs) if T is None else T
    beta = numpyro.sample(
        name="beta", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
    )
    tau = numpyro.sample(name="tau", fn=dist.HalfCauchy(scale=jnp.ones(2)))
    sigma = numpyro.sample(name="sigma", fn=dist.HalfCauchy(scale=0.1))
    z_prev = numpyro.sample(
        name="z_1", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
    )
    # Define LKJ prior
    L_Omega = numpyro.sample("L_Omega", dist.LKJCholesky(2, 10.0))
    Sigma_lower = jnp.matmul(
        jnp.diag(jnp.sqrt(tau)), L_Omega
    )  # lower cholesky factor of the covariance matrix
    noises = numpyro.sample(
        "noises",
        fn=dist.MultivariateNormal(loc=jnp.zeros(2), scale_tril=Sigma_lower),
        sample_shape=(T + T_forecast - 1,),
    )

    # Propagate the dynamics forward using jax.lax.scan
    carry = (beta, z_prev, tau)
    z_collection = [z_prev]
    carry, zs_exp = lax.scan(f, carry, noises, T + T_forecast - 1)
    z_collection = jnp.concatenate((jnp.array(z_collection), zs_exp), axis=0)

    # Sample the observed y (y_obs) and missing y (y_mis)
    numpyro.sample(
        name="y_obs1",
        fn=dist.Normal(loc=z_collection[:T, 0], scale=sigma),
        obs=obs[:, 0],
    )
    numpyro.sample(
        name="y_pred1", fn=dist.Normal(loc=z_collection[T:, 0], scale=sigma), obs=None
    )
    numpyro.sample(
        name="y_obs2",
        fn=dist.Normal(loc=z_collection[:T, 1], scale=sigma),
        obs=obs[:, 1],
    )
    numpyro.sample(
        name="y_pred2", fn=dist.Normal(loc=z_collection[T:, 1], scale=sigma), obs=None
    )
Beispiel #27
0
 def model():
     a = numpyro.sample("a", dist.Normal(0, 1))
     b = numpyro.sample("b", dist.Normal(a[..., None], jnp.ones(3)).to_event(1))
     c = numpyro.sample(
         "c", dist.MultivariateNormal(jnp.zeros(3) + a[..., None], jnp.eye(3))
     )
     with numpyro.plate("i", 2):
         d = numpyro.sample("d", dist.Dirichlet(jnp.exp(b + c)))
         numpyro.sample("e", dist.Categorical(logits=d), obs=jnp.array([0, 0]))
     return a, b, c, d
Beispiel #28
0
def model_c(nu1, y1):
    Rp = numpyro.sample('Rp', dist.Uniform(0.4, 1.2))
    RV = numpyro.sample('RV', dist.Uniform(5.0, 15.0))
    MMR_CO = numpyro.sample('MMR_CO', dist.Uniform(0.0, 0.015))
    vsini = numpyro.sample('vsini', dist.Uniform(15.0, 25.0))
    g = 2478.57730044555 * Mp / Rp**2  # gravity
    u1 = 0.0
    u2 = 0.0

    # Layer-by-layer T-P model//
    lnsT = 6.0
    #    lnsT = numpyro.sample('lnsT', dist.Uniform(3.0,5.0))
    sT = 10**lnsT
    lntaup = 0.5
    #    lntaup =  numpyro.sample('lntaup', dist.Uniform(0,1))
    taup = 10**lntaup
    cov = modelcov(lnParr, taup, sT)

    #    T0=numpyro.sample('T0', dist.Uniform(1000.0,1100.0))
    T0 = numpyro.sample('T0', dist.Uniform(1000, 2000))
    Tarr = numpyro.sample(
        'Tarr', dist.MultivariateNormal(loc=ONEARR,
                                        covariance_matrix=cov)) + T0
    # line computation CO
    qt_CO = vmap(mdbCO.qr_interp)(Tarr)

    def obyo(y, tag, nusd, nus, numatrix_CO, mdbCO, cdbH2H2):
        # CO
        SijM_CO = jit(vmap(SijT,
                           (0, None, None, None, 0)))(Tarr, mdbCO.logsij0,
                                                      mdbCO.dev_nu_lines,
                                                      mdbCO.elower, qt_CO)
        gammaLMP_CO = jit(vmap(gamma_exomol,
                               (0, 0, None, None)))(Parr, Tarr, mdbCO.n_Texp,
                                                    mdbCO.alpha_ref)
        gammaLMN_CO = gamma_natural(mdbCO.A)
        gammaLM_CO = gammaLMP_CO + gammaLMN_CO[None, :]

        sigmaDM_CO = jit(vmap(doppler_sigma,
                              (None, 0, None)))(mdbCO.dev_nu_lines, Tarr,
                                                molmassCO)
        xsm_CO = xsmatrix(numatrix_CO, sigmaDM_CO, gammaLM_CO, SijM_CO)
        dtaumCO = dtauM(dParr, xsm_CO, MMR_CO * ONEARR, molmassCO, g)
        # CIA
        dtaucH2H2 = dtauCIA(nus, Tarr, Parr, dParr, vmrH2, vmrH2, mmw, g,
                            cdbH2H2.nucia, cdbH2H2.tcia, cdbH2H2.logac)
        dtau = dtaumCO + dtaucH2H2
        sourcef = planck.piBarr(Tarr, nus)
        F0 = rtrun(dtau, sourcef) / norm

        Frot = response.rigidrot(nus, F0, vsini, u1, u2)
        mu = response.ipgauss_sampling(nusd, nus, Frot, beta, RV)
        numpyro.sample(tag, dist.Normal(mu, sigmain), obs=y)

    obyo(y1, 'y1', nu1, nus, numatrix_CO, mdbCO, cdbH2H2)
Beispiel #29
0
def model_c(nu1, y1, e1):
    Rp = sample('Rp', dist.Uniform(0.5, 1.5))
    Mp = sample('Mp', dist.Normal(33.5, 0.3))
    RV = sample('RV', dist.Uniform(26.0, 30.0))
    MMR_CO = sample('MMR_CO', dist.Uniform(0.0, maxMMR_CO))
    MMR_H2O = sample('MMR_H2O', dist.Uniform(0.0, maxMMR_H2O))
    T0 = sample('T0', dist.Uniform(1000.0, 1700.0))
    alpha = sample('alpha', dist.Uniform(0.05, 0.15))
    vsini = sample('vsini', dist.Uniform(10.0, 20.0))

    # Kipping Limb Darkening Prior
    q1 = sample('q1', dist.Uniform(0.0, 1.0))
    q2 = sample('q2', dist.Uniform(0.0, 1.0))
    u1, u2 = ld_kipping(q1, q2)

    #GP
    logtau = sample('logtau', dist.Uniform(-1.5, 0.5))  #tau=1 <=> 5A
    tau = 10**(logtau)
    loga = sample('loga', dist.Uniform(-4.0, -2.0))
    a = 10**(loga)

    #gravity
    g = getjov_gravity(Rp, Mp)

    #T-P model//
    Tarr = T0 * (Parr / Pref)**alpha

    #CO
    SijM_CO, gammaLM_CO, sigmaDM_CO = exomol(mdbCO, Tarr, Parr, molmassCO)
    xsm_CO = xsmatrix(numatrix_CO, sigmaDM_CO, gammaLM_CO, SijM_CO)
    dtaumCO = dtauM(dParr, xsm_CO, MMR_CO * ONEARR, molmassCO, g)

    #H2O
    SijM_H2O, gammaLM_H2O, sigmaDM_H2O = exomol(mdbH2O, Tarr, Parr, molmassH2O)
    xsm_H2O = xsmatrix(numatrix_H2O, sigmaDM_H2O, gammaLM_H2O, SijM_H2O)
    dtaumH2O = dtauM(dParr, xsm_H2O, MMR_H2O * ONEARR, molmassH2O, g)

    #CIA
    dtaucH2H2=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrH2,\
                      mmw,g,cdbH2H2.nucia,cdbH2H2.tcia,cdbH2H2.logac)
    dtaucH2He=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrHe,\
                      mmw,g,cdbH2He.nucia,cdbH2He.tcia,cdbH2He.logac)

    dtau = dtaumCO + dtaumH2O + dtaucH2H2 + dtaucH2He
    sourcef = planck.piBarr(Tarr, nus)
    Ftoa = Fref / Rp**2
    F0 = rtrun(dtau, sourcef) / baseline / Ftoa

    Frot = response.rigidrot(nus, F0, vsini, u1, u2)
    mu = response.ipgauss_sampling(nu1, nus, Frot, beta, RV)
    cov = gpkernel_RBF(nu1, tau, a, e1)
    sample("y1",
           dist.MultivariateNormal(loc=mu, covariance_matrix=cov),
           obs=y1)
Beispiel #30
0
def model(X, Y):
    # set uninformative log-normal priors on our three kernel hyperparameters
    var = numpyro.sample("kernel_var", dist.LogNormal(0.0, 10.0))
    noise = numpyro.sample("kernel_noise", dist.LogNormal(0.0, 10.0))
    length = numpyro.sample("kernel_length", dist.LogNormal(0.0, 10.0))

    # compute kernel
    k = kernel(X, X, var, length, noise)

    # sample Y according to the standard gaussian process formula
    numpyro.sample("Y", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
                   obs=Y)