num_comp = 1

# The model
model = TestModel0()

# The prior
log_p_x = MultivariateNormal(mu=[0])
log_p_z_fake = FlatPDF(model.num_output)
log_p_x_ext = PDFCollection([log_p_x, log_p_z_fake])
# The isotropic Likelihood
log_p_z_given_x = UncertaintyPropagationLikelihood(model, alpha=100.)
# The joint
log_p = Joint(log_p_z_given_x, log_p_x_ext)

# The approximating distribution
log_q = MixtureOfMultivariateNormals.create(log_p.num_dim, num_comp)

# Build the ELBO
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)

# Optimize the elbo
optimizer = Optimizer(elbo)

C_bounds = tuple((1e-32, None) for i in xrange(log_q.num_comp * log_q.num_dim))
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
from vuq import MixtureOfMultivariateNormals
from vuq import MonteCarloEntropyApproximation
from vuq import FirstOrderEntropyApproximation
from vuq import EntropyLowerBound


# Number of components for the test
num_dim = 1

# Create a random multivariate normal
q = MixtureOfMultivariateNormals.create(num_dim, 1) # With one component first

# Create the entropy approximation
S0 = FirstOrderEntropyApproximation()

# Create a monte carlo approximation to the entropy
Smc = MonteCarloEntropyApproximation(num_samples=100)

# The lower bound to the entropy
Sl = EntropyLowerBound()

# The first order approximation to the entropy
print 'S0[q] =', S0(q)['S']

# The lower bound to the entropy
print 'Sl[q] =', Sl.eval(q)
Ejemplo n.º 3
0
num_comp = 5

# The model
model = TestModel1()

# The prior
log_p_x = MultivariateNormal(mu=[0])
log_p_z_fake = FlatPDF(model.num_output)
log_p_x_ext = PDFCollection([log_p_x, log_p_z_fake])
# The isotropic Likelihood
log_p_z_given_x = UncertaintyPropagationLikelihood(model, alpha=1e-1)
# The joint
log_p = Joint(log_p_z_given_x, log_p_x_ext)

# The approximating distribution
log_q = MixtureOfMultivariateNormals.create(log_p.num_dim, num_comp)

# Build the ELBO
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)

# Optimize the elbo
optimizer = FullOptimizer(elbo)

C_bounds = tuple((1e-32, None) for i in xrange(log_q.num_comp * log_q.num_dim))
Ejemplo n.º 4
0
                if k != j:
                    C_new[i, k, j] += h
                q.C = C_new
                Sph = S0.eval(q)[0]
                J[i, j, k] = (Sph - S) / h
                if k != j:
                    J[i, j, k] *= 0.5
                q.C = C
    return J


# Number of components for the test
num_dim = 1

# Create a random multivariate normal
q = MixtureOfMultivariateNormals.create(num_dim, 2)  # With one component first

# Create the entropy approximation
#S0 = FirstOrderEntropyApproximation()
S0 = EntropyLowerBound()

# Create a monte carlo approximation to the entropy
Smc = MonteCarloEntropyApproximation(num_samples=100)

# Try it out with more components
print 'Doing it with two components...'
q = MixtureOfMultivariateNormals.create(num_dim, 2)
#q.comp[0].C = np.array([[2]])
#q.w = np.array([0.3, 0.7])
print str(q)
# Evaluate the entropy