from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw
from variational_sampler.display import display_fit

DIM = 1
NPTS = 100
DM = 2

target = ExponentialPowerLaw(beta=1, dim=DIM)
vs = VariationalSampler(target, (DM + target.m, target.V), NPTS)
f = vs.fit().fit
fl = vs.fit('l').fit

context = Gaussian(DM + target.m, 2 * target.V)
target2 = lambda x: target(x) + context.log(x)
vs2 = VariationalSampler(target2, context, NPTS)
f2 = vs2.fit().fit / context
fl2 = vs2.fit('l').fit / context

if DIM == 1:
    display_fit(vs.x, target, (f, f2, fl, fl2),
                ('blue', 'green', 'orange', 'red'), 
                ('VS', 'VSc', 'IS', 'ISc'))

gopt = Gaussian(target.m, target.V, Z=target.Z)
print('Error for VS: %f' % gopt.kl_div(f))
print('Error for VSc: %f' % gopt.kl_div(f2))
print('Error for IS: %f' % gopt.kl_div(fl))
print('Error for ISc: %f' % gopt.kl_div(fl2))
METHODS = {
    'l': {},
    'kl': {
        'minimizer': 'quasi_newton'
    },
    'gp': {
        'var': .1 * np.mean(H2)
    }
}
METHODS = {'l': {}, 'kl': {'minimizer': 'quasi_newton'}}

mahalanobis = lambda f: np.sum(f.m * np.dot(np.linalg.inv(f.V), f.m))
MEASURES = lambda f: np.abs(f.Z - TARGET.Z),\
    lambda f: np.sqrt(np.sum((f.m - TARGET.m) ** 2)),\
    lambda f: GS_FIT.kl_div(f)
get_measures = lambda f: np.array([m(f) for m in MEASURES])


def display(durations, measures, robust=False):
    if robust:
        mu = lambda x: np.median(x, 1)
        std = lambda x: 1.4826 * np.median(np.abs(x.T - mu(x)), 0)
    else:
        mu = lambda x: np.mean(x, 1)
        std = lambda x: np.std(x, 1)
    for k in range(len(MEASURES)):
        plt.figure()
        for m in METHODS.keys():
            plt.errorbar(NDRAWS,
                         mu(measures[m][k]),
import numpy as np
from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw

BETA = 3
DIM = 15
NPTS = 8 * DIM**2

target = ExponentialPowerLaw(beta=BETA, dim=DIM)

h2 = np.diagonal(target.V)

vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS)
f = vs.fit(minimizer='quasi_newton')
f2 = vs.fit('kl2')
f0 = vs.fit('l')
#f2 = vs.fit('gp', var=h2)

gopt = Gaussian(target.m, target.V, Z=target.Z)

print('VS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f.fit), f.kl_error, f.time))
print('VS2: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f2.fit), f2.kl_error, f2.time))
print('IS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f0.fit), f0.kl_error, f0.time))
#print('Error for BMC: %f' % gopt.kl_div(f2.fit))
from variational_sampler.toy_dist import ExponentialPowerLaw

DIM = 5
NPTS = 10* DIM ** 2


def random_var():
    A = np.random.rand(DIM, DIM)
    return np.dot(A, A.T)


def target(x):
    return np.sum(-.5 * x * np.dot(INV_VAR, x), 0)


MU = np.zeros(DIM)
VAR = random_var()
INV_VAR = np.linalg.inv(VAR)
Z = np.sqrt((2 * np.pi) ** DIM * np.linalg.det(VAR))

gopt = Gaussian(MU, VAR, Z=Z)
vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS)
f = vs.fit()
f0 = vs.fit('l')


print('Error for VS: %f (expected: %f)'\
          % (gopt.kl_div(f.fit), f.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gopt.kl_div(f0.fit), f0.kl_error))
# Random sampling approach
vs = VariationalSampler(target, (mk, vk), NPTS)
f_kl = vs.fit()
f_l = vs.fit('l')
f_gp = vs.fit('gp', var=v)

# Deterministic sampling approch (tweak a vs object)
x, w = gauss_hermite_rule(NPTS, mk, vk)
vsd = VariationalSampler(target, (mk, vk), NPTS, x=x, w=w)
fd_kl = vsd.fit()
fd_l = vsd.fit('l')
fd_gp = vsd.fit('gp', var=v)


print('Error for VS: %f (expected: %f)'\
          % (gs_fit.kl_div(f_kl.fit), f_kl.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gs_fit.kl_div(f_l.fit), f_l.kl_error))
print('Error for BMC: %f' % gs_fit.kl_div(f_gp.fit))
print('Error for GH: %f' % gs_fit.kl_div(fd_l.fit))
print('Error for VSd: %f' % gs_fit.kl_div(fd_kl.fit))
print('Error for GP: %f' % gs_fit.kl_div(fd_gp.fit))

acronyms = ('VS', 'IS', 'BMC')
colors = ('blue', 'red', 'green')
legend = ('VS', 'direct', 'spline')
plt.figure()
display_fit(vs.x, target, (f_kl, f_l, f_gp), colors, legend, xmax=XMAX)
plt.figure()
display_fit(vsd.x, target, (fd_kl, fd_l, fd_gp), colors, legend, xmax=XMAX)
import numpy as np
from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw


BETA = 3
DIM = 15
NPTS = 8 * DIM ** 2

target = ExponentialPowerLaw(beta=BETA, dim=DIM)

h2 = np.diagonal(target.V)

vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS)
f = vs.fit(minimizer='quasi_newton')
f2 = vs.fit('kl2')
f0 = vs.fit('l')
#f2 = vs.fit('gp', var=h2)

gopt = Gaussian(target.m, target.V, Z=target.Z)

print('VS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f.fit), f.kl_error, f.time))
print('VS2: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f2.fit), f2.kl_error, f2.time))
print('IS: error=%f (expected=%f), fitting time=%f'\
          % (gopt.kl_div(f0.fit), f0.kl_error, f0.time))
#print('Error for BMC: %f' % gopt.kl_div(f2.fit))
# Random sampling approach
vs = VariationalSampler(target, (mk, vk), NPTS)
f_kl = vs.fit()
f_l = vs.fit('l')
f_gp = vs.fit('gp', var=v)

# Deterministic sampling approch (tweak a vs object)
x, w = gauss_hermite_rule(NPTS, mk, vk)
vsd = VariationalSampler(target, (mk, vk), NPTS, x=x, w=w)
fd_kl = vsd.fit()
fd_l = vsd.fit('l')
fd_gp = vsd.fit('gp', var=v)


print('Error for VS: %f (expected: %f)'\
          % (gs_fit.kl_div(f_kl.fit), f_kl.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gs_fit.kl_div(f_l.fit), f_l.kl_error))
print('Error for BMC: %f' % gs_fit.kl_div(f_gp.fit))
print('Error for GH: %f' % gs_fit.kl_div(fd_l.fit))
print('Error for VSd: %f' % gs_fit.kl_div(fd_kl.fit))
print('Error for GP: %f' % gs_fit.kl_div(fd_gp.fit))

acronyms = ('VS', 'IS', 'BMC')
colors = ('blue', 'red', 'green')
legend = ('VS', 'direct', 'spline')
plt.figure()
display_fit(vs.x, target, (f_kl, f_l, f_gp), colors, legend, xmax=XMAX)
plt.figure()
display_fit(vsd.x, target, (fd_kl, fd_l, fd_gp), colors, legend, xmax=XMAX)
Beispiel #8
0
from variational_sampler import VariationalSampler
from variational_sampler.gaussian import Gaussian
from variational_sampler.toy_dist import ExponentialPowerLaw
from variational_sampler.display import display_fit

DIM = 1
NPTS = 100
DM = 2

target = ExponentialPowerLaw(beta=1, dim=DIM)
vs = VariationalSampler(target, (DM + target.m, target.V), NPTS)
f = vs.fit().fit
fl = vs.fit('l').fit

context = Gaussian(DM + target.m, 2 * target.V)
target2 = lambda x: target(x) + context.log(x)
vs2 = VariationalSampler(target2, context, NPTS)
f2 = vs2.fit().fit / context
fl2 = vs2.fit('l').fit / context

if DIM == 1:
    display_fit(vs.x, target, (f, f2, fl, fl2),
                ('blue', 'green', 'orange', 'red'), ('VS', 'VSc', 'IS', 'ISc'))

gopt = Gaussian(target.m, target.V, Z=target.Z)
print('Error for VS: %f' % gopt.kl_div(f))
print('Error for VSc: %f' % gopt.kl_div(f2))
print('Error for IS: %f' % gopt.kl_div(fl))
print('Error for ISc: %f' % gopt.kl_div(fl2))
Beispiel #9
0
from variational_sampler.toy_dist import ExponentialPowerLaw

DIM = 5
NPTS = 10 * DIM**2


def random_var():
    A = np.random.rand(DIM, DIM)
    return np.dot(A, A.T)


def target(x):
    return np.sum(-.5 * x * np.dot(INV_VAR, x), 0)


MU = np.zeros(DIM)
VAR = random_var()
INV_VAR = np.linalg.inv(VAR)
Z = np.sqrt((2 * np.pi)**DIM * np.linalg.det(VAR))

gopt = Gaussian(MU, VAR, Z=Z)
vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS)
f = vs.fit()
f0 = vs.fit('l')


print('Error for VS: %f (expected: %f)'\
          % (gopt.kl_div(f.fit), f.kl_error))
print('Error for IS: %f (expected: %f)'\
           % (gopt.kl_div(f0.fit), f0.kl_error))
GS_FIT = Gaussian(TARGET.m, TARGET.V, Z=TARGET.Z)

METHODS = {
    'l': {},
    'kl': {'minimizer': 'quasi_newton'},
    'gp': {'var': .1 * np.mean(H2)}
}
METHODS = {
    'l': {},
    'kl': {'minimizer': 'quasi_newton'}
}

mahalanobis = lambda f: np.sum(f.m * np.dot(np.linalg.inv(f.V), f.m))
MEASURES = lambda f: np.abs(f.Z - TARGET.Z),\
    lambda f: np.sqrt(np.sum((f.m - TARGET.m) ** 2)),\
    lambda f: GS_FIT.kl_div(f)
get_measures = lambda f: np.array([m(f) for m in MEASURES])

def display(durations, measures, robust=False):
    if robust:
        mu = lambda x: np.median(x, 1)
        std = lambda x: 1.4826 * np.median(np.abs(x.T - mu(x)), 0)
    else:
        mu = lambda x: np.mean(x, 1)
        std = lambda x: np.std(x, 1)
    for k in range(len(MEASURES)):
        plt.figure()
        for m in METHODS.keys():
            plt.errorbar(NDRAWS, mu(measures[m][k]), std(measures[m][k]), fmt='o-')
            """
            plt.errorbar(mu(durations[m]), mu(measures[m][k]), std(measures[m][k]), fmt='o-')