from variational_sampler import VariationalSampler from variational_sampler.gaussian import Gaussian from variational_sampler.toy_dist import ExponentialPowerLaw from variational_sampler.display import display_fit DIM = 1 NPTS = 100 DM = 2 target = ExponentialPowerLaw(beta=1, dim=DIM) vs = VariationalSampler(target, (DM + target.m, target.V), NPTS) f = vs.fit().fit fl = vs.fit('l').fit context = Gaussian(DM + target.m, 2 * target.V) target2 = lambda x: target(x) + context.log(x) vs2 = VariationalSampler(target2, context, NPTS) f2 = vs2.fit().fit / context fl2 = vs2.fit('l').fit / context if DIM == 1: display_fit(vs.x, target, (f, f2, fl, fl2), ('blue', 'green', 'orange', 'red'), ('VS', 'VSc', 'IS', 'ISc')) gopt = Gaussian(target.m, target.V, Z=target.Z) print('Error for VS: %f' % gopt.kl_div(f)) print('Error for VSc: %f' % gopt.kl_div(f2)) print('Error for IS: %f' % gopt.kl_div(fl)) print('Error for ISc: %f' % gopt.kl_div(fl2))
import numpy as np import pylab as plt from variational_sampler import VariationalSampler from variational_sampler.gaussian import Gaussian from variational_sampler.toy_dist import ExponentialPowerLaw BETA = 3 DIM = 5 NDRAWS = (((DIM + 2) * (DIM + 1)) / 2) * np.array((5, 10, 20, 50)) SCALING = 1 REPEATS = 200 TARGET = ExponentialPowerLaw(beta=BETA, dim=DIM) H2 = SCALING * np.diagonal(TARGET.V) GS_FIT = Gaussian(TARGET.m, TARGET.V, Z=TARGET.Z) METHODS = { 'l': {}, 'kl': { 'minimizer': 'quasi_newton' }, 'gp': { 'var': .1 * np.mean(H2) } } METHODS = {'l': {}, 'kl': {'minimizer': 'quasi_newton'}} mahalanobis = lambda f: np.sum(f.m * np.dot(np.linalg.inv(f.V), f.m)) MEASURES = lambda f: np.abs(f.Z - TARGET.Z),\ lambda f: np.sqrt(np.sum((f.m - TARGET.m) ** 2)),\ lambda f: GS_FIT.kl_div(f)
import numpy as np from variational_sampler import VariationalSampler from variational_sampler.gaussian import Gaussian from variational_sampler.toy_dist import ExponentialPowerLaw BETA = 3 DIM = 15 NPTS = 8 * DIM**2 target = ExponentialPowerLaw(beta=BETA, dim=DIM) h2 = np.diagonal(target.V) vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS) f = vs.fit(minimizer='quasi_newton') f2 = vs.fit('kl2') f0 = vs.fit('l') #f2 = vs.fit('gp', var=h2) gopt = Gaussian(target.m, target.V, Z=target.Z) print('VS: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f.fit), f.kl_error, f.time)) print('VS2: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f2.fit), f2.kl_error, f2.time)) print('IS: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f0.fit), f0.kl_error, f0.time)) #print('Error for BMC: %f' % gopt.kl_div(f2.fit))
from variational_sampler.toy_dist import ExponentialPowerLaw DIM = 5 NPTS = 10* DIM ** 2 def random_var(): A = np.random.rand(DIM, DIM) return np.dot(A, A.T) def target(x): return np.sum(-.5 * x * np.dot(INV_VAR, x), 0) MU = np.zeros(DIM) VAR = random_var() INV_VAR = np.linalg.inv(VAR) Z = np.sqrt((2 * np.pi) ** DIM * np.linalg.det(VAR)) gopt = Gaussian(MU, VAR, Z=Z) vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS) f = vs.fit() f0 = vs.fit('l') print('Error for VS: %f (expected: %f)'\ % (gopt.kl_div(f.fit), f.kl_error)) print('Error for IS: %f (expected: %f)'\ % (gopt.kl_div(f0.fit), f0.kl_error))
Compute the points and weights for the Gauss-Hermite quadrature with the normalized Gaussian N(0, h2) as a weighting function. """ x, w = h_roots(npts) x *= np.sqrt(2 * vk) x += mk w /= np.sqrt(np.pi) return x, w target = ExponentialPowerLaw(beta=BETA) v = float(target.V) mk = target.m + DM vk = DV * v gs_fit = Gaussian(target.m, target.V, Z=target.Z) # Random sampling approach vs = VariationalSampler(target, (mk, vk), NPTS) f_kl = vs.fit() f_l = vs.fit('l') f_gp = vs.fit('gp', var=v) # Deterministic sampling approch (tweak a vs object) x, w = gauss_hermite_rule(NPTS, mk, vk) vsd = VariationalSampler(target, (mk, vk), NPTS, x=x, w=w) fd_kl = vsd.fit() fd_l = vsd.fit('l') fd_gp = vsd.fit('gp', var=v)
import numpy as np from variational_sampler import VariationalSampler from variational_sampler.gaussian import Gaussian from variational_sampler.toy_dist import ExponentialPowerLaw BETA = 3 DIM = 15 NPTS = 8 * DIM ** 2 target = ExponentialPowerLaw(beta=BETA, dim=DIM) h2 = np.diagonal(target.V) vs = VariationalSampler(target, (1., np.zeros(DIM), h2), ndraws=NPTS) f = vs.fit(minimizer='quasi_newton') f2 = vs.fit('kl2') f0 = vs.fit('l') #f2 = vs.fit('gp', var=h2) gopt = Gaussian(target.m, target.V, Z=target.Z) print('VS: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f.fit), f.kl_error, f.time)) print('VS2: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f2.fit), f2.kl_error, f2.time)) print('IS: error=%f (expected=%f), fitting time=%f'\ % (gopt.kl_div(f0.fit), f0.kl_error, f0.time)) #print('Error for BMC: %f' % gopt.kl_div(f2.fit))
from variational_sampler.toy_dist import ExponentialPowerLaw DIM = 5 NPTS = 10 * DIM**2 def random_var(): A = np.random.rand(DIM, DIM) return np.dot(A, A.T) def target(x): return np.sum(-.5 * x * np.dot(INV_VAR, x), 0) MU = np.zeros(DIM) VAR = random_var() INV_VAR = np.linalg.inv(VAR) Z = np.sqrt((2 * np.pi)**DIM * np.linalg.det(VAR)) gopt = Gaussian(MU, VAR, Z=Z) vs = VariationalSampler(target, (MU, VAR), ndraws=NPTS) f = vs.fit() f0 = vs.fit('l') print('Error for VS: %f (expected: %f)'\ % (gopt.kl_div(f.fit), f.kl_error)) print('Error for IS: %f (expected: %f)'\ % (gopt.kl_div(f0.fit), f0.kl_error))
import numpy as np import pylab as plt from variational_sampler import VariationalSampler from variational_sampler.gaussian import Gaussian from variational_sampler.toy_dist import ExponentialPowerLaw BETA = 3 DIM = 5 NDRAWS = (((DIM + 2) * (DIM + 1)) / 2) * np.array((5, 10, 20, 50)) SCALING = 1 REPEATS = 200 TARGET = ExponentialPowerLaw(beta=BETA, dim=DIM) H2 = SCALING * np.diagonal(TARGET.V) GS_FIT = Gaussian(TARGET.m, TARGET.V, Z=TARGET.Z) METHODS = { 'l': {}, 'kl': {'minimizer': 'quasi_newton'}, 'gp': {'var': .1 * np.mean(H2)} } METHODS = { 'l': {}, 'kl': {'minimizer': 'quasi_newton'} } mahalanobis = lambda f: np.sum(f.m * np.dot(np.linalg.inv(f.V), f.m)) MEASURES = lambda f: np.abs(f.Z - TARGET.Z),\ lambda f: np.sqrt(np.sum((f.m - TARGET.m) ** 2)),\ lambda f: GS_FIT.kl_div(f) get_measures = lambda f: np.array([m(f) for m in MEASURES])