Exemple #1
0
def update_params_ExponentialNode(node, X, rand_gen, gamma_prior):
    """
    The prior over the rate parameter is a Gamma

    p(\lambda) = Gamma(\alpha_0=a_0, \beta_0=b_0)

    see[1]

    [1] - https: // en.wikipedia.org / wiki / Conjugate_prior

    p(\lambda|X) = Gamma(\alpha_n=a_n, \beta_n=b_n)

    see[1]
    """

    assert isinstance(gamma_prior, PriorGamma)

    N = len(X)

    #
    # updating posterior parameters
    sum_x = X.sum()
    a_n = gamma_prior.a_0 + N
    b_n = gamma_prior.b_0 + sum_x

    #
    # sampling
    lambda_sam = sample_parametric_node(Gamma(a_n, b_n), 1, None, rand_gen)
    lambda_sam = lambda_sam  # / b_n

    #
    # updating params
    node.l = lambda_sam[0]
    def test_eval_parametric(self):
        data = np.array([1, 1, 1, 1, 1, 1, 1], dtype=np.float32).reshape(
            (1, 7))

        spn = (Gaussian(mean=1.0, stdev=1.0, scope=[0]) *
               Exponential(l=1.0, scope=[1]) *
               Gamma(alpha=1.0, beta=1.0, scope=[2]) *
               LogNormal(mean=1.0, stdev=1.0, scope=[3]) *
               Poisson(mean=1.0, scope=[4]) * Bernoulli(p=0.6, scope=[5]) *
               Categorical(p=[0.1, 0.2, 0.7], scope=[6]))

        ll = log_likelihood(spn, data)

        tf_ll = eval_tf(spn, data)

        self.assertTrue(np.all(np.isclose(ll, tf_ll)))

        spn_copy = Copy(spn)

        tf_graph, data_placeholder, variable_dict = spn_to_tf_graph(
            spn_copy, data, 1)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            tf_graph_to_spn(variable_dict)

        str_val = spn_to_str_equation(spn)
        str_val2 = spn_to_str_equation(spn_copy)

        self.assertEqual(str_val, str_val2)
Exemple #3
0
def update_params_GammaFixAlphaNode(node, X, rand_gen, gamma_prior):
    """
    The prior over \beta is again a Gamma distribution

    p(\beta) = Gamma(a_0, b_0)

    with shape \alpha_0 = a_0 and rate \beta_0 = b_0

    see[1], eq. (52 - 54), considering the inverse of the scale, the rate \frac{1}{\beta}
        and [2]

    [1] - Fink, D. A Compendium of Conjugate Priors(1997)
          https: // www.johndcook.com / CompendiumOfConjugatePriors.pdf
    [2] - https: // en.wikipedia.org / wiki / Conjugate_prior

    Return a sample for the node.params drawn from the posterior distribution
    which for conjugacy is still a Gamma

    p(\beta, | X) = Gamma(a_n, b_n)

    see[1, 2]
    """

    assert isinstance(gamma_prior, PriorGamma)

    N = len(X)

    #
    # if N is 0, then it would be like sampling from the prior
    # a_n = a_0 + N * alpha
    a_n = gamma_prior.a_0 + N * node.alpha
    # logger.info(a_n, gamma_prior.a_0, N, node.alpha)

    #
    # x = X[node.row_ids, node.scope]
    sum_x = X.sum()
    b_n = gamma_prior.b_0 + sum_x

    #
    # sampling
    # TODO, optimize it with numba
    rate_sam = sample_parametric_node(Gamma(a_n, b_n), 1, None, rand_gen)

    #
    # updating params (only scale)
    node.beta = rate_sam[0]
Exemple #4
0
def update_params_GaussianNode2(node, X, rand_gen, nig_prior):
    """
    The prior over parameters is a Normal - Inverse - Gamma(NIG)


    [1] - Murphy K., Conjugate Bayesian analysis of the Gaussian distribution(2007)
          https: // www.cs.ubc.ca / ~murphyk / Papers / bayesGauss.pdf
          https://en.wikipedia.org/wiki/Conjugate_prior
          http://thaines.com/content/misc/gaussian_conjugate_prior_cheat_sheet.pdf
          ** http://homepages.math.uic.edu/~rgmartin/Teaching/Stat591/Bayes/Notes/591_gibbs.pdf
          ** https://people.eecs.berkeley.edu/~jordan/courses/260-spring10/lectures/lecture5.pdf

    Return a sample for the node.params drawn from the posterior distribution
    which for conjugacy is still a NIG

    p(\mu, \sigma ^ 2, | X) = NIG(m_n, V_n, a_n, b_n)

    see[1]
    """

    assert isinstance(nig_prior, PriorNormalInverseGamma), nig_prior

    n = len(X)
    X_hat = np.mean(X)

    mean = (nig_prior.V_0 * nig_prior.m_0 + n * X_hat) / (nig_prior.V_0 + n)

    v = nig_prior.V_0 + n

    a = nig_prior.a_0 + n / 2

    b = nig_prior.b_0 + (n / 2) * (np.var(X) +
                                   (v / (v + n)) * np.power(X_hat - mean, 2))

    inv_sigma2_sam = sample_parametric_node(Gamma(a, b), 1, rand_gen)

    sigma2_sam = 1 / inv_sigma2_sam

    mu_sam = sample_parametric_node(Gaussian(mean, sigma2_sam / v), 1,
                                    rand_gen)

    # updating params
    node.mean = mu_sam[0]
    # node.stdev = np.sqrt(node.variance)
    node.stdev = np.sqrt(sigma2_sam)[0]
Exemple #5
0
def update_params_PoissonNode(node, X, rand_gen, gamma_prior):
    """
    The prior over \lambda is a Gamma distribution

    p(\lambda) = Gamma(a_0, b_0)

    with shape \alpha_0 = a_0 and scale \beta_0 = b_0

    see[1]

    [1] - https: // en.wikipedia.org / wiki / Conjugate_prior

    Return a sample for the node.params drawn from the posterior distribution
    which for conjugacy is still a Gamma

    p(\lambda, | X) = Gamma(a_n, b_n)

    see[1]
    """

    assert isinstance(gamma_prior, PriorGamma)

    N = len(X)

    #
    # if N is 0, then it would be like sampling from the prior
    # x = X[node.row_ids, node.scope]
    sum_x = X.sum()
    a_n = gamma_prior.a_0 + sum_x
    b_n = gamma_prior.b_0 + N

    #
    # sampling
    # TODO, optimize it with numba
    lambda_sam = sample_parametric_node(Gamma(a_n, b_n), 1, None, rand_gen)
    lambda_sam = lambda_sam  # / b_n

    #
    # updating params
    node.mean = lambda_sam[0]
Exemple #6
0
        v, c = np.unique(data, return_counts=True)
        p = c / c.sum()
        node.p = dict(zip(v, p))

    else:
        raise Exception("Unknown parametric " + str(type(node)))


if __name__ == '__main__':
    node = Gaussian(np.inf, np.inf)
    data = np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
    update_parametric_parameters_mle(node, data)
    assert np.isclose(node.mean, np.mean(data))
    assert np.isclose(node.stdev, np.std(data))

    node = Gamma(np.inf, np.inf)
    data = np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
    update_parametric_parameters_mle(node, data)
    assert np.isclose(node.alpha / node.beta, np.mean(data)), node.alpha

    node = LogNormal(np.inf, np.inf)
    data = np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
    update_parametric_parameters_mle(node, data)
    assert np.isclose(node.mean, np.log(data).mean(), atol=0.00001)
    assert np.isclose(node.stdev, np.log(data).std(), atol=0.00001)

    node = Poisson(np.inf)
    data = np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
    update_parametric_parameters_mle(node, data)
    assert np.isclose(node.mean, np.mean(data))
Exemple #7
0
    # testing MPE inference for the univ distributions

    #
    # gaussian
    gaussian = Gaussian(mean=0.5, stdev=2, scope=[0])

    pdf_x, pdf_y = approximate_density(gaussian, x_range)
    fig, ax = plt.subplots(1, 1)
    ax.plot(pdf_x, pdf_y, label="gaussian")
    plt.axvline(x=gaussian.mode, color='r')
    if show_plots:
        plt.show()

    #
    # gamma, alpha=1, beta=5
    gamma = Gamma(alpha=1, beta=5, scope=[0])

    pdf_x, pdf_y = approximate_density(gamma, x_range)
    fig, ax = plt.subplots(1, 1)
    ax.plot(pdf_x, pdf_y, label="gamma")
    plt.axvline(x=gamma.mode, color='r')
    if show_plots:
        plt.show()

    #
    # gamma, alpha=20, beta=5
    gamma = Gamma(alpha=20, beta=5, scope=[0])

    pdf_x, pdf_y = approximate_density(gamma, x_range)
    fig, ax = plt.subplots(1, 1)
    ax.plot(pdf_x, pdf_y, label="gamma")