Example #1
0
 def sampling_approx(self, ndraws, method='VS'):
     fit0 = self.laplace_approx()
     target = lambda w: self.log_posterior(w)
     vs = VariationalSampler(target, (fit0.m, fit0.V), ndraws)
     if method == 'VS':
         f = vs.fit(minimizer='quasi_newton')
     elif method == 'IS':
         f = vs.fit(objective='l')
     elif method == 'BMC':
         var = np.trace(vs.kernel.V) / vs.kernel.dim
         f = vs.fit(objective='gp', var=var)
     else:
         raise ValueError('unknown sampling method')
     if method in ('IS', 'VS'):
         print('Evidence rel. error: %f' %\
                   (np.sqrt(f.var_integral[0, 0]) / f.fit.Z))
     return f.fit
Example #2
0
def error(v, kind='kl'):
    if kind == 'kl':
        return v.kl_error
    elif kind == 'integral':
        return v.var_integral[0, 0]
    elif kind == 'theta':
        return v.var_theta[0, 0]


target = ExponentialPowerLaw(beta=BETA, dim=DIM)

npts = 10**np.arange(1, 6)
e, e0 = [], []

for n in npts:
    vs = VariationalSampler(target, (np.zeros(DIM), target.V), n)
    f = vs.fit('kl')
    f0 = vs.fit('l')
    e.append(error(f, kind=KIND))
    e0.append(error(f0, kind=KIND))

e = np.array(e)
e0 = np.array(e0)

plt.figure()
plt.plot(npts, np.log(e))
plt.plot(npts, np.log(e0))
plt.legend(('VS', 'IS'))
plt.show()
    Function that takes an array with shape (dim, n) as input and
    returns an array with shape (n,) that contains the corresponding
    target log-distribution values.
    """
    return np.sum(-.5 * np.abs(x) ** beta, 0)


"""
Tune the mean and variance of the sampling kernel. If we use as vector
as the variance, it will be understood as a diagonal matrix.
"""
ms = np.zeros(DIM)
vs = np.ones(DIM)

"""
Create a variational sampler object.
"""
v = VariationalSampler(target, (ms, vs), 100 * DIM)

"""
Perform fitting.
"""
f = v.fit(family='factor_gaussian')

"""
Get the adjusted normalization constant, mean and variance.
"""
print('Estimated normalizing constant: %f' % f.fit.Z)
print('Estimated mean: %s' % f.fit.m)
print('Estimated variance (diagonal): %s' % f.fit.v)
KIND = 'kl'

def error(v, kind='kl'):
    if kind == 'kl':
        return v.kl_error
    elif kind == 'integral':
        return v.var_integral[0, 0]
    elif kind == 'theta':
        return v.var_theta[0, 0]

target = ExponentialPowerLaw(beta=BETA, dim=DIM)

npts = 10 ** np.arange(1, 6)
e, e0 = [], []

for n in npts:
    vs = VariationalSampler(target, (np.zeros(DIM), target.V), n)
    f = vs.fit('kl')
    f0 = vs.fit('l')
    e.append(error(f, kind=KIND))
    e0.append(error(f0, kind=KIND))

e = np.array(e)
e0 = np.array(e0)

plt.figure()
plt.plot(npts, np.log(e))
plt.plot(npts, np.log(e0))
plt.legend(('VS', 'IS'))
plt.show()
import numpy as np
import pylab as plt

from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw
from variational_sampler.display import display_fit

DIM = 1
NPTS = 100
DM = 2

target = ExponentialPowerLaw(beta=1, dim=DIM)
vs = VariationalSampler(target, (DM + target.m, target.V), NPTS)
f = vs.fit().fit
fl = vs.fit('l').fit

context = Gaussian(DM + target.m, 2 * target.V)
target2 = lambda x: target(x) + context.log(x)
vs2 = VariationalSampler(target2, context, NPTS)
f2 = vs2.fit().fit / context
fl2 = vs2.fit('l').fit / context

if DIM == 1:
    display_fit(vs.x, target, (f, f2, fl, fl2),
                ('blue', 'green', 'orange', 'red'), 
                ('VS', 'VSc', 'IS', 'ISc'))

gopt = Gaussian(target.m, target.V, Z=target.Z)
print('Error for VS: %f' % gopt.kl_div(f))
print('Error for VSc: %f' % gopt.kl_div(f2))
import numpy as np
from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw

BETA = 3
DIM = 15
NPTS = 8 * DIM**2

target = ExponentialPowerLaw(beta=BETA, dim=DIM)

h2 = np.diagonal(target.V)

vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS)
f = vs.fit(minimizer='quasi_newton')
f2 = vs.fit('kl2')
f0 = vs.fit('l')
#f2 = vs.fit('gp', var=h2)

gopt = Gaussian(target.m, target.V, Z=target.Z)

print('VS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f.fit), f.kl_error, f.time))
print('VS2: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f2.fit), f2.kl_error, f2.time))
print('IS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f0.fit), f0.kl_error, f0.time))
#print('Error for BMC: %f' % gopt.kl_div(f2.fit))
        std = lambda x: np.std(x, 1)
    for k in range(len(MEASURES)):
        plt.figure()
        for m in METHODS.keys():
            plt.errorbar(NDRAWS,
                         mu(measures[m][k]),
                         std(measures[m][k]),
                         fmt='o-')
            """
            plt.errorbar(mu(durations[m]), mu(measures[m][k]), std(measures[m][k]), fmt='o-')
            """
        plt.legend(METHODS.keys(), loc=0)
    plt.show()


durations = {}
measures = {}
for m in METHODS.keys():
    durations[m] = np.zeros((len(NDRAWS), REPEATS))
    measures[m] = np.zeros((len(MEASURES), len(NDRAWS), REPEATS))

for i in range(len(NDRAWS)):
    ndraws = NDRAWS[i]
    for r in range(REPEATS):
        vs = VariationalSampler(TARGET, (np.zeros(DIM), H2), ndraws=ndraws)
        for m in METHODS.keys():
            print(m)
            f = vs.fit(m, **METHODS[m])
            durations[m][i, r] = vs.sampling_time + f.time
            measures[m][:, i, r] = get_measures(f.fit)
    x, w = h_roots(npts)
    x *= np.sqrt(2 * vk)
    x += mk
    w /= np.sqrt(np.pi)
    return x, w


target = ExponentialPowerLaw(beta=BETA)
v = float(target.V)
mk = target.m + DM
vk = DV * v

gs_fit = Gaussian(target.m, target.V, Z=target.Z)

# Random sampling approach
vs = VariationalSampler(target, (mk, vk), NPTS)
f_kl = vs.fit()
f_l = vs.fit('l')
f_gp = vs.fit('gp', var=v)

# Deterministic sampling approch (tweak a vs object)
x, w = gauss_hermite_rule(NPTS, mk, vk)
vsd = VariationalSampler(target, (mk, vk), NPTS, x=x, w=w)
fd_kl = vsd.fit()
fd_l = vsd.fit('l')
fd_gp = vsd.fit('gp', var=v)


print('Error for VS: %f (expected: %f)'\
          % (gs_fit.kl_div(f_kl.fit), f_kl.kl_error))
print('Error for IS: %f (expected: %f)'\
from variational_sampler.toy_dist import ExponentialPowerLaw

DIM = 5
NPTS = 10* DIM ** 2


def random_var():
    A = np.random.rand(DIM, DIM)
    return np.dot(A, A.T)


def target(x):
    return np.sum(-.5 * x * np.dot(INV_VAR, x), 0)


MU = np.zeros(DIM)
VAR = random_var()
INV_VAR = np.linalg.inv(VAR)
Z = np.sqrt((2 * np.pi) ** DIM * np.linalg.det(VAR))

gopt = Gaussian(MU, VAR, Z=Z)
vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS)
f = vs.fit()
f0 = vs.fit('l')


print('Error for VS: %f (expected: %f)'\
          % (gopt.kl_div(f.fit), f.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gopt.kl_div(f0.fit), f0.kl_error))
import numpy as np
from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw


BETA = 3
DIM = 15
NPTS = 8 * DIM ** 2

target = ExponentialPowerLaw(beta=BETA, dim=DIM)

h2 = np.diagonal(target.V)

vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS)
f = vs.fit(minimizer='quasi_newton')
f2 = vs.fit('kl2')
f0 = vs.fit('l')
#f2 = vs.fit('gp', var=h2)

gopt = Gaussian(target.m, target.V, Z=target.Z)

print('VS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f.fit), f.kl_error, f.time))
print('VS2: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f2.fit), f2.kl_error, f2.time))
print('IS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f0.fit), f0.kl_error, f0.time))
#print('Error for BMC: %f' % gopt.kl_div(f2.fit))
    x, w = h_roots(npts)
    x *= np.sqrt(2 * vk)
    x += mk
    w /= np.sqrt(np.pi)
    return x, w


target = ExponentialPowerLaw(beta=BETA)
v = float(target.V)
mk = target.m + DM
vk = DV * v

gs_fit = Gaussian(target.m, target.V, Z=target.Z)

# Random sampling approach
vs = VariationalSampler(target, (mk, vk), NPTS)
f_kl = vs.fit()
f_l = vs.fit('l')
f_gp = vs.fit('gp', var=v)

# Deterministic sampling approch (tweak a vs object)
x, w = gauss_hermite_rule(NPTS, mk, vk)
vsd = VariationalSampler(target, (mk, vk), NPTS, x=x, w=w)
fd_kl = vsd.fit()
fd_l = vsd.fit('l')
fd_gp = vsd.fit('gp', var=v)


print('Error for VS: %f (expected: %f)'\
          % (gs_fit.kl_div(f_kl.fit), f_kl.kl_error))
print('Error for IS: %f (expected: %f)'\
Example #12
0
import numpy as np
import pylab as plt

from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw
from variational_sampler.display import display_fit

DIM = 1
NPTS = 100
DM = 2

target = ExponentialPowerLaw(beta=1, dim=DIM)
vs = VariationalSampler(target, (DM + target.m, target.V), NPTS)
f = vs.fit().fit
fl = vs.fit('l').fit

context = Gaussian(DM + target.m, 2 * target.V)
target2 = lambda x: target(x) + context.log(x)
vs2 = VariationalSampler(target2, context, NPTS)
f2 = vs2.fit().fit / context
fl2 = vs2.fit('l').fit / context

if DIM == 1:
    display_fit(vs.x, target, (f, f2, fl, fl2),
                ('blue', 'green', 'orange', 'red'), ('VS', 'VSc', 'IS', 'ISc'))

gopt = Gaussian(target.m, target.V, Z=target.Z)
print('Error for VS: %f' % gopt.kl_div(f))
print('Error for VSc: %f' % gopt.kl_div(f2))
print('Error for IS: %f' % gopt.kl_div(fl))
Example #13
0
from variational_sampler.toy_dist import ExponentialPowerLaw

DIM = 5
NPTS = 10 * DIM**2


def random_var():
    A = np.random.rand(DIM, DIM)
    return np.dot(A, A.T)


def target(x):
    return np.sum(-.5 * x * np.dot(INV_VAR, x), 0)


MU = np.zeros(DIM)
VAR = random_var()
INV_VAR = np.linalg.inv(VAR)
Z = np.sqrt((2 * np.pi)**DIM * np.linalg.det(VAR))

gopt = Gaussian(MU, VAR, Z=Z)
vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS)
f = vs.fit()
f0 = vs.fit('l')


print('Error for VS: %f (expected: %f)'\
          % (gopt.kl_div(f.fit), f.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gopt.kl_div(f0.fit), f0.kl_error))
Example #14
0
def target(x, beta=2):
    """
    Function that takes an array with shape (dim, n) as input and
    returns an array with shape (n,) that contains the corresponding
    target log-distribution values.
    """
    return np.sum(-.5 * np.abs(x)**beta, 0)


"""
Tune the mean and variance of the sampling kernel. If we use as vector
as the variance, it will be understood as a diagonal matrix.
"""
ms = np.zeros(DIM)
vs = np.ones(DIM)
"""
Create a variational sampler object.
"""
v = VariationalSampler(target, (ms, vs), 100 * DIM)
"""
Perform fitting.
"""
f = v.fit(family='factor_gaussian')
"""
Get the adjusted normalization constant, mean and variance.
"""
print('Estimated normalizing constant: %f' % f.fit.Z)
print('Estimated mean: %s' % f.fit.m)
print('Estimated variance (diagonal): %s' % f.fit.v)
    else:
        mu = lambda x: np.mean(x, 1)
        std = lambda x: np.std(x, 1)
    for k in range(len(MEASURES)):
        plt.figure()
        for m in METHODS.keys():
            plt.errorbar(NDRAWS, mu(measures[m][k]), std(measures[m][k]), fmt='o-')
            """
            plt.errorbar(mu(durations[m]), mu(measures[m][k]), std(measures[m][k]), fmt='o-')
            """
        plt.legend(METHODS.keys(), loc=0)
    plt.show()

durations = {}
measures = {}
for m in METHODS.keys():
    durations[m] = np.zeros((len(NDRAWS), REPEATS))
    measures[m] = np.zeros((len(MEASURES), len(NDRAWS), REPEATS))

for i in range(len(NDRAWS)):
    ndraws = NDRAWS[i]
    for r in range(REPEATS):
        vs = VariationalSampler(TARGET, (np.zeros(DIM), H2), ndraws=ndraws)
        for m in METHODS.keys():
            print(m)
            f = vs.fit(m, **METHODS[m])
            durations[m][i, r] = vs.sampling_time + f.time
            measures[m][:, i, r] = get_measures(f.fit)