Exemple #1
0
def linear_regression_mvn(x, lmbda=1.):
    sigma <~ dist.Exponential(lmbda)
    sigma2 <~ dist.Exponential(lmbda)
    rho <~ dist.Uniform(-1, 1)
    cov = jnp.array([[sigma**2, rho*sigma*sigma2],[rho*sigma*sigma2, sigma2**2]])
    coeffs <~ dist.MvNormal(jnp.ones(x.shape[-1]), cov)
    y = jnp.dot(x, coeffs)
    predictions <~ dist.Normal(y, sigma)
    return predictions
Exemple #2
0
def linear_regression(x, lmbda=1.0):
    sigma < ~dist.Exponential(lmbda)
    coeffs_init = jnp.ones(x.shape[-1])
    coeffs < ~dist.Normal(coeffs_init, sigma)
    y = jnp.dot(x, coeffs)
    predictions < ~dist.Normal(y, sigma)
    return predictions
Exemple #3
0
        def naive_bayes(X, num_categories):
            num_predictors = np.shape(X)[1]
            num_training_samples = np.shape(X)[0]

            # Priors
            alpha = np.ones(num_categories)
            pi <~ dist.Dirichlet(alpha, shape=num_categories)
            mu <~ dist.Normal(mu=0, sigma=100, shape=(num_categories, num_predictors))
            sigma <~ dist.Exponential(100, shape=(num_categories, num_predictors))

            # Assign classes to data points
            z <~ dist.Categorical(pi, shape=num_training_samples)

            # The components are independent and normally distributed
            xi <~ dist.Normal(mu=mu[z], sd=sigma[z])

            return z
Exemple #4
0
def linear_regression(x, lmbda=1.0):
    sigma < ~dist.Exponential(lmbda)
    coeffs < ~dist.Normal(jnp.zeros(x.shape[-1]), 1)
    predictions < ~dist.Normal(x * coeffs, sigma)
    return predictions