Example #1
0
def model(
    covariates: jnp.ndarray,
    x: Optional[jnp.ndarray] = None,
    x_dim: int = 1,
) -> None:

    if x is not None:
        x_dim = x.shape[-1]

    seq_len, batch, c_dim = covariates.shape
    weight_var = numpyro.sample(
        "weight_var",
        dist.LogNormal(-5 * jnp.ones((c_dim, x_dim)), 5 * jnp.ones(
            (c_dim, x_dim))))
    sigma = numpyro.sample(
        "sigma", dist.LogNormal(-20 * jnp.ones(x_dim), 20 * jnp.ones(x_dim)))

    def transition_fn(
        carry: Tuple[jnp.ndarray, jnp.ndarray],
        t: jnp.ndarray,
    ) -> Tuple[Tuple[jnp.ndarray, jnp.ndarray], jnp.ndarray]:

        z_prev, w_prev = carry
        z = numpyro.sample("z", dist.Normal(z_prev, 1))
        weight = numpyro.sample("weight", dist.Normal(w_prev, weight_var))
        numpyro.sample(
            "x", dist.Normal(z + jnp.matmul(covariates[t], weight), sigma))
        return (z, weight), None

    z_init = jnp.zeros((batch, x_dim))
    w_init = jnp.zeros((c_dim, x_dim))
    with numpyro.handlers.condition(data={"x": x}):
        scan(transition_fn, (z_init, w_init), jnp.arange(seq_len))
Example #2
0
 def model(self, batch):
     XL, XH = batch['XL'], batch['XH']
     y = batch['y']
     NL, NH = XL.shape[0], XH.shape[0]
     D = XH.shape[1]
     # set uninformative log-normal priors for low-fidelity kernel
     var_L = sample('kernel_var_L', dist.LogNormal(0.0, 1.0), sample_shape = (1,))
     length_L = sample('kernel_length_L', dist.LogNormal(0.0, 1.0), sample_shape = (D,))
     theta_L = np.concatenate([var_L, length_L])
     # set uninformative log-normal priors for high-fidelity kernel
     var_H = sample('kernel_var_H', dist.LogNormal(0.0, 1.0), sample_shape = (1,))
     length_H = sample('kernel_length_H', dist.LogNormal(0.0, 1.0), sample_shape = (D,))
     theta_H = np.concatenate([var_H, length_H])
     # prior for rho
     rho = sample('rho', dist.Normal(0.0, 10.0), sample_shape = (1,))
     # Compute kernels
     K_LL = self.kernel(XL, XL, theta_L) + np.eye(NL)*1e-8
     K_LH = rho*self.kernel(XL, XH, theta_L)
     K_HH = rho**2 * self.kernel(XH, XH, theta_L) + \
                     self.kernel(XH, XH, theta_H) + np.eye(NH)*1e-8
     K = np.vstack((np.hstack((K_LL,K_LH)),
                    np.hstack((K_LH.T,K_HH))))
     L = cholesky(K, lower=True)
     # Generate latent function
     beta_L = sample('beta_L', dist.Normal(0.0, 1.0))
     beta_H = sample('beta_H', dist.Normal(0.0, 1.0))
     eta_L = sample('eta_L', dist.Normal(0.0, 1.0), sample_shape=(NL,))
     eta_H = sample('eta_H', dist.Normal(0.0, 1.0), sample_shape=(NH,))
     beta = np.concatenate([beta_L*np.ones(NL), beta_H*np.ones(NH)])
     eta = np.concatenate([eta_L, eta_H])
     f = np.matmul(L, eta) + beta
     # Bernoulli likelihood
     sample('y', dist.Bernoulli(logits=f), obs=y)
Example #3
0
def model(N, y=None):
    """
    :param int N: number of measurement times
    :param numpy.ndarray y: measured populations with shape (N, 2)
    """
    # initial population
    z_init = numpyro.sample("z_init",
                            dist.LogNormal(jnp.log(10), 1).expand([2]))
    # measurement times
    ts = jnp.arange(float(N))
    # parameters alpha, beta, gamma, delta of dz_dt
    theta = numpyro.sample(
        "theta",
        dist.TruncatedNormal(
            low=0.0,
            loc=jnp.array([1.0, 0.05, 1.0, 0.05]),
            scale=jnp.array([0.5, 0.05, 0.5, 0.05]),
        ),
    )
    # integrate dz/dt, the result will have shape N x 2
    z = odeint(dz_dt, z_init, ts, theta, rtol=1e-6, atol=1e-5, mxstep=1000)
    # measurement errors
    sigma = numpyro.sample("sigma", dist.LogNormal(-1, 1).expand([2]))
    # measured populations
    numpyro.sample("y", dist.LogNormal(jnp.log(z), sigma), obs=y)
Example #4
0
def model(
    covariates: jnp.ndarray,
    x: Optional[jnp.ndarray] = None,
    x_dim: int = 1,
    z_dim: int = 1,
    seasonality: int = 7,
) -> None:

    seq_len, batch, c_dim = covariates.shape
    if x is not None:
        x_dim = x.shape[-1]

    season_trans = jax.ops.index_add(jnp.eye(seasonality - 1, k=-1), 0, -1)
    season_var = numpyro.sample("season_var", dist.LogNormal(-5, 5))

    trend_trans = jnp.array([[1, 1], [0, 1]])
    trend_var = numpyro.sample(
        "trend_var", dist.LogNormal(jnp.array([-5, -5]), jnp.array([5, 5])))

    weight_var = numpyro.sample(
        "weight_var",
        dist.LogNormal(-5 * jnp.ones((c_dim, x_dim)), 5 * jnp.ones(
            (c_dim, x_dim))))
    sigma = numpyro.sample(
        "sigma", dist.LogNormal(-5 * np.ones(x_dim), 5 * np.ones(x_dim)))

    def transition_fn(
        carry: Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray,
                     jnp.ndarray], t: jnp.ndarray
    ) -> Tuple[Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray],
               jnp.ndarray]:

        z_prev, s_prev, t_prev, w_prev = carry

        z = numpyro.sample("z", dist.Normal(z_prev, jnp.ones(z_dim)))

        s = jnp.matmul(s_prev, season_trans.T)
        s0 = numpyro.sample("s0", dist.Normal(s[:, 0], season_var))
        s = jax.ops.index_update(s, jax.ops.index[:, 0], s0)

        trend_mu = jnp.matmul(t_prev, trend_trans.T)
        trend = numpyro.sample("trend", dist.Normal(trend_mu, trend_var))

        weight = numpyro.sample("weight", dist.Normal(w_prev, weight_var))
        exogenous = jnp.matmul(covariates[t], weight)

        numpyro.sample(
            "x", dist.Normal(z.sum(-1) + s0 + trend[:, 0] + exogenous, sigma))

        return (z, s, trend, weight), None

    z_init = jnp.zeros((batch, z_dim))
    s_init = jnp.zeros((batch, seasonality - 1))
    t_init = jnp.zeros((batch, 2))
    w_init = jnp.zeros((c_dim, x_dim))
    with numpyro.handlers.condition(data={"x": x}):
        scan(transition_fn, (z_init, s_init, t_init, w_init),
             jnp.arange(seq_len))
Example #5
0
def model(data):
    xc = numpyro.sample('xc', dist.Normal(0, 10))
    yc = numpyro.sample('yc', dist.Normal(0, 10))
    w = numpyro.sample('w', dist.LogNormal(0, 10))
    h = numpyro.sample('h', dist.LogNormal(0, 10))
    phi = numpyro.sample('phi', dist.Uniform(0, np.pi))
    px, py = forward(xc, yc, w, h, phi)
    sigma = numpyro.sample('sigma', dist.Exponential(1.))
    return numpyro.sample('obs', dist.Normal(np.vstack([px, py]), sigma), obs=data)
Example #6
0
File: gp.py Project: xidulu/numpyro
def model(X, Y):
    # set uninformative log-normal priors on our three kernel hyperparameters
    var = numpyro.sample("kernel_var", dist.LogNormal(0.0, 10.0))
    noise = numpyro.sample("kernel_noise", dist.LogNormal(0.0, 10.0))
    length = numpyro.sample("kernel_length", dist.LogNormal(0.0, 10.0))

    # compute kernel
    k = kernel(X, X, var, length, noise)

    # sample Y according to the standard gaussian process formula
    numpyro.sample("Y", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
                   obs=Y)
Example #7
0
 def model(self, batch):
     X = batch['X']
     y = batch['y']
     N, D = X.shape
     # set uninformative log-normal priors
     var = sample('kernel_var', dist.LogNormal(0.0, 10.0))
     length = sample('kernel_length', dist.LogNormal(np.zeros(D), 10.0*np.ones(D)))
     noise = sample('noise_var', dist.LogNormal(0.0, 10.0))
     theta = np.concatenate([np.array([var]), np.array(length)])
     # compute kernel
     K = self.kernel(X, X, theta) + np.eye(N)*(noise + 1e-8)
     # sample Y according to the standard gaussian process formula
     sample("y", dist.MultivariateNormal(loc=np.zeros(N), covariance_matrix=K), obs=y)
def GPC(X, y):
    N = y.shape[0]

    # Priors.
    alpha = numpyro.sample('alpha', dist.LogNormal(0, 1))
    rho = numpyro.sample('rho', dist.LogNormal(0, 1))
    beta = numpyro.sample('beta', dist.Normal(0, 1))
    eta = numpyro.sample('eta', dist.Normal(np.zeros(N), 1))

    # Latent function.
    f = compute_f(alpha, rho, beta, eta, X, 1e-3)

    # Likelihood.
    numpyro.sample('obs', dist.Bernoulli(logits=f), obs=y)
def GP(X, y):
    # Set informative log-normal priors on kernel hyperparameters.
    variance = numpyro.sample("kernel_var", dist.LogNormal(0.0, 0.1))
    lengthscale = numpyro.sample("kernel_length", dist.LogNormal(0.0, 1.0))
    sigma = numpyro.sample("sigma", dist.LogNormal(0.0, 1.0))

    # Compute kernel
    K = squared_exp_cov_1D(X, variance, lengthscale)
    K += np.eye(X.shape[0]) * np.power(sigma, 2)

    # Sample y according to the standard gaussian process formula
    numpyro.sample("y",
                   dist.MultivariateNormal(loc=np.zeros(X.shape[0]),
                                           covariance_matrix=K),
                   obs=y)
Example #10
0
def sample_y(dist_y, theta, y, sigma_obs=None):
    if not sigma_obs:
        if dist_y == 'gamma':
            sigma_obs = numpyro.sample('sigma_obs', dist.Exponential(1))
        else:
            sigma_obs = numpyro.sample('sigma_obs', dist.HalfNormal(1))

    if dist_y == 'student':
        numpyro.sample('y', dist.StudentT(numpyro.sample('nu_y', dist.Gamma(1, .1)), theta, sigma_obs), obs=y)
    elif dist_y == 'normal':
        numpyro.sample('y', dist.Normal(theta, sigma_obs), obs=y)
    elif dist_y == 'lognormal':
        numpyro.sample('y', dist.LogNormal(theta, sigma_obs), obs=y)
    elif dist_y == 'gamma':
        numpyro.sample('y', dist.Gamma(jnp.exp(theta), sigma_obs), obs=y)
    elif dist_y == 'gamma_raw':
        numpyro.sample('y', dist.Gamma(theta, sigma_obs), obs=y)
    elif dist_y == 'poisson':
        numpyro.sample('y', dist.Poisson(theta), obs=y)
    elif dist_y == 'exponential':
        numpyro.sample('y', dist.Exponential(jnp.exp(theta)), obs=y)
    elif dist_y == 'exponential_raw':
        numpyro.sample('y', dist.Exponential(theta), obs=y)
    elif dist_y == 'uniform':
        numpyro.sample('y', dist.Uniform(0, 1), obs=y)
    else:
        raise NotImplementedError
Example #11
0
def test_numpyro_dict_priors_defaults_numpyro():

    demo_priors = {
        "lengthscale": dist.LogNormal(loc=0.0, scale=1.0),
        "variance": dist.LogNormal(loc=0.0, scale=1.0),
        "obs_noise": dist.LogNormal(loc=0.0, scale=1.0),
    }

    numpyro_params = numpyro_dict_params(demo_priors)

    assert set(numpyro_params) == set(demo_priors.keys())
    for ikey, iparam in demo_priors.items():
        # check keys exist for param
        assert set(numpyro_params[ikey].keys()) == set(("prior", "param_type"))
        # check init value is the same as initial value
        chex.assert_equal(numpyro_params[ikey]["prior"], iparam)
Example #12
0
def test_model_with_transformed_distribution():
    x_prior = dist.HalfNormal(2)
    y_prior = dist.LogNormal(scale=3.)  # transformed distribution

    def model():
        numpyro.sample('x', x_prior)
        numpyro.sample('y', y_prior)

    params = {'x': jnp.array(-5.), 'y': jnp.array(7.)}
    model = handlers.seed(model, random.PRNGKey(0))
    inv_transforms = {
        'x': biject_to(x_prior.support),
        'y': biject_to(y_prior.support)
    }
    expected_samples = partial(transform_fn, inv_transforms)(params)
    expected_potential_energy = (-x_prior.log_prob(expected_samples['x']) -
                                 y_prior.log_prob(expected_samples['y']) -
                                 inv_transforms['x'].log_abs_det_jacobian(
                                     params['x'], expected_samples['x']) -
                                 inv_transforms['y'].log_abs_det_jacobian(
                                     params['y'], expected_samples['y']))

    reparam_model = handlers.reparam(model, {'y': TransformReparam()})
    base_params = {'x': params['x'], 'y_base': params['y']}
    actual_samples = constrain_fn(handlers.seed(reparam_model,
                                                random.PRNGKey(0)), (), {},
                                  base_params,
                                  return_deterministic=True)
    actual_potential_energy = potential_energy(reparam_model, (), {},
                                               base_params)

    assert_allclose(expected_samples['x'], actual_samples['x'])
    assert_allclose(expected_samples['y'], actual_samples['y'])
    assert_allclose(actual_potential_energy, expected_potential_energy)
Example #13
0
def test_lift():
    def model():
        loc1 = numpyro.param("loc1", 0.0)
        scale1 = numpyro.param("scale1", 1.0, constraint=constraints.positive)
        numpyro.sample("latent1", dist.Normal(loc1, scale1))

        loc2 = numpyro.param("loc2", 1.0)
        scale2 = numpyro.param("scale2", 2.0, constraint=constraints.positive)
        latent2 = numpyro.sample("latent2", dist.Normal(loc2, scale2))
        return latent2

    loc1_prior = dist.Normal()
    scale1_prior = dist.LogNormal()
    prior = {"loc1": loc1_prior, "scale1": scale1_prior}

    with handlers.trace() as tr:
        with handlers.seed(rng_seed=1):
            model()

    with handlers.trace() as lifted_tr:
        with handlers.seed(rng_seed=2):
            with handlers.lift(prior=prior):
                model()

    for name in tr.keys():
        assert name in lifted_tr
        if name in prior:
            assert lifted_tr[name]["fn"] is prior[name]
            assert lifted_tr[name]["type"] == "sample"
            assert lifted_tr[name]["value"] not in (0.0, 1.0)
        elif name in ("loc2", "scale2"):
            assert lifted_tr[name]["type"] == "param"
Example #14
0
 def model(T, q=1, r=1, phi=0., beta=0.):
     x = 0.
     mu = 0.
     for i in range(T):
         x = numpyro.sample(f'x_{i}', dist.LogNormal(phi * x, q))
         mu = beta * mu + x
         numpyro.sample(f'y_{i}', dist.Normal(mu, r))
Example #15
0
def model(
    covariates: jnp.ndarray,
    x: Optional[jnp.ndarray] = None,
    x_dim: int = 1,
    z_dim: int = 1,
) -> None:

    if x is not None:
        x_dim = x.shape[-1]

    seq_len, batch, c_dim = covariates.shape
    weight = numpyro.sample(
        "weight",
        dist.Normal(np.zeros((c_dim, x_dim)),
                    np.ones((c_dim, x_dim)) * 0.1))
    bias = numpyro.sample("bias",
                          dist.Normal(np.zeros(x_dim),
                                      np.ones(x_dim) * 10))
    sigma = numpyro.sample(
        "sigma", dist.LogNormal(-5 * np.ones(x_dim), 5 * np.ones(x_dim)))

    def transition_fn(
            carry: Tuple[jnp.ndarray],
            t: jnp.ndarray) -> Tuple[Tuple[jnp.ndarray], jnp.ndarray]:

        z_prev, *_ = carry
        z = numpyro.sample("z", dist.Normal(z_prev, jnp.ones(z_dim)))
        numpyro.sample(
            "x",
            dist.Cauchy(z + jnp.matmul(covariates[t], weight) + bias, sigma))
        return (z, ), None

    with numpyro.handlers.condition(data={"x": x}):
        scan(transition_fn, (jnp.zeros((batch, z_dim)), ), jnp.arange(seq_len))
Example #16
0
def test_model_with_transformed_distribution():
    x_prior = dist.HalfNormal(2)
    y_prior = dist.LogNormal(scale=3.)  # transformed distribution

    def model():
        sample('x', x_prior)
        sample('y', y_prior)

    params = {'x': np.array(-5.), 'y': np.array(7.)}
    model = seed(model, random.PRNGKey(0))
    inv_transforms = {
        'x': biject_to(x_prior.support),
        'y': biject_to(y_prior.support)
    }
    expected_samples = partial(transform_fn, inv_transforms)(params)
    expected_potential_energy = (-x_prior.log_prob(expected_samples['x']) -
                                 y_prior.log_prob(expected_samples['y']) -
                                 inv_transforms['x'].log_abs_det_jacobian(
                                     params['x'], expected_samples['x']) -
                                 inv_transforms['y'].log_abs_det_jacobian(
                                     params['y'], expected_samples['y']))

    base_inv_transforms = {
        'x': biject_to(x_prior.support),
        'y': biject_to(y_prior.base_dist.support)
    }
    actual_samples = constrain_fn(seed(model, random.PRNGKey(0)), (), {},
                                  base_inv_transforms, params)
    actual_potential_energy = potential_energy(model, (), {},
                                               base_inv_transforms, params)

    assert_allclose(expected_samples['x'], actual_samples['x'])
    assert_allclose(expected_samples['y'], actual_samples['y'])
    assert_allclose(actual_potential_energy, expected_potential_energy)
def test_numpyro_marginal_ll_numpyro_priors_type(n_samples, n_features, n_latents, dtype):

    # create sample data
    ds = _gen_training_data(n_samples, n_features, n_latents)

    # convert to tyle
    ds = jax.tree_util.tree_map(lambda x: x.astype(dtype), ds)

    # initialize parameters
    params, posterior = _get_conjugate_posterior_params()

    # convert to numpyro-style params
    numpyro_params = numpyro_dict_params(params)

    # convert to priors
    numpyro_params = add_priors(numpyro_params, dist.LogNormal(0.0, 10.0))

    # initialize numpyro-style GP model
    npy_model = numpyro_marginal_ll(posterior, numpyro_params)

    # do one forward pass with context
    with numpyro.handlers.seed(rng_seed=KEY):
        pred = npy_model(ds)

        chex.assert_equal(pred.dtype, ds.y.dtype)
Example #18
0
def test_lift():
    def model():
        loc1 = numpyro.param("loc1", 0.)
        scale1 = numpyro.param("scale1", 1., constraint=constraints.positive)
        numpyro.sample("latent1", dist.Normal(loc1, scale1))

        loc2 = numpyro.param("loc2", 1.)
        scale2 = numpyro.param("scale2", 2., constraint=constraints.positive)
        latent2 = numpyro.sample("latent2", dist.Normal(loc2, scale2))
        return latent2

    loc1_prior = dist.Normal()
    scale1_prior = dist.LogNormal()
    prior = {"loc1": loc1_prior, "scale1": scale1_prior}

    with handlers.trace() as tr:
        with handlers.seed(rng_seed=1):
            model()

    with handlers.trace() as lifted_tr:
        with handlers.seed(rng_seed=2):
            with handlers.lift(prior=prior):
                model()

    for name in tr.keys():
        assert name in lifted_tr
        if name in prior:
            assert lifted_tr[name]['fn'] is prior[name]
            assert lifted_tr[name]['type'] == 'sample'
            assert lifted_tr[name]['value'] not in (0., 1.)
        elif name in ('loc2', 'scale2'):
            assert lifted_tr[name]['type'] == 'param'
Example #19
0
 def model(T, q=1, r=1, phi=0.0, beta=0.0):
     x = 0.0
     mu = 0.0
     for i in range(T):
         x = numpyro.sample(f"x_{i}", dist.LogNormal(phi * x, q))
         mu = beta * mu + x
         numpyro.sample(f"y_{i}", dist.Normal(mu, r))
Example #20
0
def create_model(yT, yC, num_components):
    # Cosntants
    nC = yC.shape[0]
    nT = yT.shape[0]
    zC = jnp.isinf(yC).sum().item()
    zT = jnp.isinf(yT).sum().item()
    yT_finite = yT[jnp.isinf(yT) == False]
    yC_finite = yC_finite = yC[jnp.isinf(yC) == False]
    K = num_components
    
    p = numpyro.sample('p', dist.Beta(.5, .5))
    gammaC = numpyro.sample('gammaC', dist.Beta(1, 1))
    gammaT = numpyro.sample('gammaT', dist.Beta(1, 1))

    etaC = numpyro.sample('etaC', dist.Dirichlet(jnp.ones(K) / K))
    etaT = numpyro.sample('etaT', dist.Dirichlet(jnp.ones(K) / K))
    
    with numpyro.plate('mixutre_components', K):
        nu = numpyro.sample('nu', dist.LogNormal(3.5, 0.5))
        mu = numpyro.sample('mu', dist.Normal(0, 3))
        sigma = numpyro.sample('sigma', dist.LogNormal(0, .5))
        phi = numpyro.sample('phi', dist.Normal(0, 3))


    gammaT_star = simulate_data.compute_gamma_T_star(gammaC, gammaT, p)
    etaT_star = simulate_data.compute_eta_T_star(etaC, etaT, p, gammaC, gammaT,
                                                 gammaT_star)

    with numpyro.plate('y_C', nC - zC):
        numpyro.sample('finite_obs_C',
                       Mix(nu[None, :],
                           mu[None, :],
                           sigma[None, :],
                           phi[None, :],
                           etaC[None, :]), obs=yC_finite[:, None])

    with numpyro.plate('y_T', nT - zT):
        numpyro.sample('finite_obs_T',
                       Mix(nu[None, :],
                           mu[None, :],
                           sigma[None, :],
                           phi[None, :],
                           etaT_star[None, :]), obs=yT_finite[:, None])

    numpyro.sample('N_C', dist.Binomial(nC, gammaC), obs=zC)
    numpyro.sample('N_T', dist.Binomial(nT, gammaT_star), obs=zT)
Example #21
0
def LogNormalFromInterval(low, high):
    """This assumes a centered 90% confidence interval, i.e. the left endpoint
    marks 0.05% on the CDF, the right 0.95%."""
    loghigh = math.log(high)
    loglow = math.log(low)
    mean = (loghigh + loglow) / 2
    stdev = (loghigh - loglow) / (2 * 1.645)
    return dist.LogNormal(mean, stdev)
Example #22
0
    def model(self, t, X):        

        noise = sample('noise', dist.LogNormal(0.0, 1.0), sample_shape=(self.D,))
        hyp = sample('hyp', dist.Gamma(1.0, 0.5), sample_shape=(self.D,))
        W = sample('W', dist.LogNormal(0.0, 1.0), sample_shape=(self.D,))
        
        J0 = sample('J0', dist.Uniform(1.0, 10.0)) # 2.5
        k1 = sample('k1', dist.Uniform(80., 120.0)) # 100.
        k2 = sample('k2', dist.Uniform(1., 10.0)) # 6.
        k3 = sample('k3', dist.Uniform(2., 20.0)) # 16.
        k4 = sample('k4', dist.Uniform(80., 120.0)) # 100.
        k5 = sample('k5', dist.Uniform(0.1, 2.0)) # 1.28
        k6 = sample('k6', dist.Uniform(2., 20.0)) # 12.
        k = sample('k', dist.Uniform(0.1, 2.0)) # 1.8
        ka = sample('ka', dist.Uniform(2., 20.0)) # 13.
        q = sample('q', dist.Uniform(1., 10.0)) # 4.
        KI = sample('KI', dist.Uniform(0.1, 2.0)) # 0.52
        phi = sample('phi', dist.Uniform(0.05, 1.0)) # 0.1
        Np = sample('Np', dist.Uniform(0.1, 2.0)) # 1.
        A = sample('A', dist.Uniform(1., 10.0)) #4.
        
        IC = sample('IC', dist.Uniform(0, 1))
        
        # compute kernel
        K_11 = W[0]*self.RBF(self.t_t[0], self.t_t[0], hyp[0]) + np.eye(self.N[0])*(noise[0] + self.jitter)
        K_22 = W[1]*self.RBF(self.t_t[1], self.t_t[1], hyp[1]) + np.eye(self.N[1])*(noise[1] + self.jitter)
        K_33 = W[2]*self.RBF(self.t_t[2], self.t_t[2], hyp[2]) + np.eye(self.N[2])*(noise[2] + self.jitter)
        K = np.concatenate([np.concatenate([K_11, np.zeros((self.N[0], self.N[1])), np.zeros((self.N[0], self.N[2]))], axis = 1),
                            np.concatenate([np.zeros((self.N[1], self.N[0])), K_22, np.zeros((self.N[1], self.N[2]))], axis = 1),
                            np.concatenate([np.zeros((self.N[2], self.N[0])), np.zeros((self.N[2], self.N[1])), K_33], axis = 1)], axis = 0)
        
        # compute mean
        x0 = np.array([0.5, 1.9, 0.18, 0.15, IC, 0.1, 0.064])
        mut = odeint(self.dxdt, x0, self.t.flatten(), J0, k1, k2, k3, k4, k5, k6, k, ka, q, KI, phi, Np, A)
        mu1 = mut[self.i_t[0],ind[0]] / self.max_X[0]
        mu2 = mut[self.i_t[1],ind[1]] / self.max_X[1]
        mu3 = mut[self.i_t[2],ind[2]] / self.max_X[2]
        mu = np.concatenate((mu1,mu2,mu3),axis=0)
        
        # Concat data
        mu = mu.flatten('F')
        X = np.concatenate((self.X[0],self.X[1],self.X[2]),axis=0)
        X = X.flatten('F')

        # sample X according to the standard gaussian process formula
        sample("X", dist.MultivariateNormal(loc=mu, covariance_matrix=K), obs=X)
Example #23
0
 def model(self, batch):
     X = batch['X']
     y = batch['y']
     N, D = X.shape
     # set uninformative log-normal priors
     var = sample('kernel_var', dist.LogNormal(0.0, 1.0), sample_shape = (1,))
     length = sample('kernel_length', dist.LogNormal(0.0, 1.0), sample_shape = (D,))
     theta = np.concatenate([var, length])
     # compute kernel
     K = self.kernel(X, X, theta) + np.eye(N)*1e-8
     L = cholesky(K, lower=True)
     # Generate latent function
     beta = sample('beta', dist.Normal(0.0, 1.0))
     eta = sample('eta', dist.Normal(0.0, 1.0), sample_shape=(N,))
     f = np.matmul(L, eta) + beta
     # Bernoulli likelihood
     sample('y', dist.Bernoulli(logits=f), obs=y)
Example #24
0
 def model(self, batch):
     X = batch['X']
     y = batch['y']
     N = y.shape[0]
     dim_X = X.shape[1]
     dim_H = self.dim_H
     D = dim_X + dim_H
     # Generate latent inputs
     H = sample('H', dist.Normal(np.zeros((N, dim_H)), np.ones((N, dim_H))))
     X = np.concatenate([X, H], axis = 1)
     # set uninformative log-normal priors on GP hyperparameters
     var = sample('kernel_var', dist.LogNormal(0.0, 10.0))
     length = sample('kernel_length', dist.LogNormal(np.zeros(D), 10.0*np.ones(D)))
     noise = sample('noise_var', dist.LogNormal(0.0, 10.0))
     theta = np.concatenate([np.array([var]), np.array(length)])
     # compute kernel
     K = self.kernel(X, X, theta) + np.eye(N)*(noise + 1e-8)
     # sample Y according to the GP likelihood
     sample("y", dist.MultivariateNormal(loc=np.zeros(N), covariance_matrix=K), obs=y)
Example #25
0
    def model(self, home_team, away_team, gameweek):
        n_gameweeks = max(gameweek) + 1
        sigma_0 = pyro.sample("sigma_0", dist.HalfNormal(5))
        sigma_b = pyro.sample("sigma_b", dist.HalfNormal(5))
        gamma = pyro.sample("gamma", dist.LogNormal(0, 1))

        b = pyro.sample("b", dist.Normal(0, 1))

        loc_mu_b = pyro.sample("loc_mu_b", dist.Normal(0, 1))
        scale_mu_b = pyro.sample("scale_mu_b", dist.HalfNormal(1))

        with pyro.plate("teams", self.n_teams):

            log_a0 = pyro.sample("log_a0", dist.Normal(0, sigma_0))
            mu_b = pyro.sample(
                "mu_b",
                dist.TransformedDistribution(
                    dist.Normal(0, 1),
                    dist.transforms.AffineTransform(loc_mu_b, scale_mu_b),
                ),
            )
            sigma_rw = pyro.sample("sigma_rw", dist.HalfNormal(0.1))

            with pyro.plate("random_walk", n_gameweeks - 1):
                diffs = pyro.sample(
                    "diff",
                    dist.TransformedDistribution(
                        dist.Normal(0, 1),
                        dist.transforms.AffineTransform(0, sigma_rw)),
                )

            diffs = np.vstack((log_a0, diffs))
            log_a = np.cumsum(diffs, axis=-2)

            with pyro.plate("weeks", n_gameweeks):
                log_b = pyro.sample(
                    "log_b",
                    dist.TransformedDistribution(
                        dist.Normal(0, 1),
                        dist.transforms.AffineTransform(
                            mu_b + b * log_a, sigma_b),
                    ),
                )

        pyro.sample("log_a", dist.Delta(log_a), obs=log_a)
        home_inds = np.array([self.team_to_index[team] for team in home_team])
        away_inds = np.array([self.team_to_index[team] for team in away_team])
        home_rate = np.clip(
            log_a[gameweek, home_inds] - log_b[gameweek, away_inds] + gamma,
            -7, 2)
        away_rate = np.clip(
            log_a[gameweek, away_inds] - log_b[gameweek, home_inds], -7, 2)

        pyro.sample("home_goals", dist.Poisson(np.exp(home_rate)))
        pyro.sample("away_goals", dist.Poisson(np.exp(away_rate)))
Example #26
0
    def model(data):
        with numpyro.plate("states", dim):
            transition = numpyro.sample("transition", dist.Dirichlet(jnp.ones(dim)))
            emission_loc = numpyro.sample("emission_loc", dist.Normal(0, 1))
            emission_scale = numpyro.sample("emission_scale", dist.LogNormal(0, 1))

        trans_prob = numpyro.sample("initialize", dist.Dirichlet(jnp.ones(dim)))
        for t, y in markov(enumerate(data)):
            x = numpyro.sample("x_{}".format(t), dist.Categorical(trans_prob))
            numpyro.sample("y_{}".format(t), dist.Normal(emission_loc[x], emission_scale[x]), obs=y)
            trans_prob = transition[x]
Example #27
0
    def model(batch, subsample, full_size):
        drift = numpyro.sample("drift", dist.LogNormal(-1, 0.5))
        with handlers.substitute(data={"data": subsample}):
            plate = numpyro.plate("data", full_size, subsample_size=len(subsample))
        assert plate.size == 50

        def transition_fn(z_prev, y_curr):
            with plate:
                z_curr = numpyro.sample("state", dist.Normal(z_prev, drift))
                y_curr = numpyro.sample("obs", dist.Bernoulli(logits=z_curr), obs=y_curr)
            return z_curr, y_curr

        _, result = scan(transition_fn, jnp.zeros(len(subsample)), batch, length=num_time_steps)
        return result
Example #28
0
def model2():

    data = [np.array([-1.0, -1.0, 0.0]), np.array([-1.0, 1.0])]
    p = numpyro.param("p", np.array([0.25, 0.75]))
    loc = numpyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1))
    # FIXME results in infinite loop in transformeddist_to_funsor.
    # scale = numpyro.sample("scale", dist.LogNormal(0, 1))
    z1 = numpyro.sample("z1", dist.Categorical(p))
    scale = numpyro.sample("scale", dist.LogNormal(jnp.array([0.0, 1.0])[z1], 1))
    with numpyro.plate("data[0]", 3):
        numpyro.sample("x1", dist.Normal(loc[z1], scale), obs=data[0])
    with numpyro.plate("data[1]", 2):
        z2 = numpyro.sample("z2", dist.Categorical(p))
        numpyro.sample("x2", dist.Normal(loc[z2], scale), obs=data[1])
Example #29
0
def model(
    x: Optional[jnp.ndarray] = None,
    seq_len: int = 0,
    batch: int = 0,
    x_dim: int = 1,
    z_dim: int = 3,
    future_steps: int = 0,
) -> None:
    """Hierarchical Kalman filter."""

    if x is not None:
        seq_len, batch, x_dim = x.shape

    trans_mu = numpyro.sample(
        "trans_mu",
        dist.Uniform(-jnp.ones((z_dim, z_dim)), jnp.ones((z_dim, z_dim))))
    trans_var = numpyro.sample(
        "trans_var",
        dist.LogNormal(-20 * jnp.ones((z_dim, z_dim)), 20 * jnp.ones(
            (z_dim, z_dim))))
    with numpyro.plate("batch", batch, dim=-3):
        trans = numpyro.sample(
            "trans",
            dist.Normal(trans_mu, trans_var).expand((1, z_dim, z_dim)))

    emit = numpyro.sample(
        "emit", dist.Normal(jnp.zeros((z_dim, x_dim)), jnp.ones(
            (z_dim, x_dim))))
    z_std = numpyro.sample("z_std", dist.Gamma(jnp.ones(z_dim),
                                               jnp.ones(z_dim)))
    x_std = numpyro.sample("x_std", dist.Gamma(jnp.ones(x_dim),
                                               jnp.ones(x_dim)))

    def transition_fn(
            carry: Tuple[jnp.ndarray],
            t: jnp.ndarray) -> Tuple[Tuple[jnp.ndarray], jnp.ndarray]:

        z_prev, *_ = carry
        index = jnp.arange(batch)
        z = numpyro.sample(
            "z", dist.Normal(jnp.matmul(z_prev, trans)[index, index], z_std))
        numpyro.sample("x", dist.Normal(jnp.matmul(z, emit), x_std))
        return (z, ), None

    z_init = jnp.zeros((batch, z_dim))
    with numpyro.handlers.condition(data={"x": x}):
        scan(transition_fn, (z_init, ), jnp.arange(seq_len + future_steps))
Example #30
0
def model(N, y=None):
    """
    :param int N: number of measurement times
    :param numpy.ndarray y: measured populations with shape (N, 2)
    """
    # initial population
    z_init = numpyro.sample("z_init", dist.LogNormal(jnp.log(10), 1), sample_shape=(2,))
    # measurement times
    ts = jnp.arange(float(N))
    # parameters alpha, beta, gamma, delta of dz_dt
    theta = numpyro.sample(
        "theta",
        dist.TruncatedNormal(low=0., loc=jnp.array([0.5, 0.05, 1.5, 0.05]),
                             scale=jnp.array([0.5, 0.05, 0.5, 0.05])))
    # integrate dz/dt, the result will have shape N x 2
    z = odeint(dz_dt, z_init, ts, theta, rtol=1e-5, atol=1e-3, mxstep=500)
    # measurement errors, we expect that measured hare has larger error than measured lynx
    sigma = numpyro.sample("sigma", dist.Exponential(jnp.array([1, 2])))
    # measured populations (in log scale)
    numpyro.sample("y", dist.Normal(jnp.log(z), sigma), obs=y)