def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): D1, D2, D3, N, num_inducing, Q = 150, 200, 400, 500, 3, 7 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) from GPy.models import mrd from GPy import kern reload(mrd); reload(kern) k = kern.linear(Q, [.05] * Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) m = mrd.MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw) for i, Y in enumerate(Ylist): m['{}_noise'.format(i + 1)] = Y.var() / 100. # DEBUG # np.seterr("raise") if optimize: print "Optimizing Model:" m.optimize(messages=1, max_iters=8e3, max_f_eval=8e3, gtol=.1) if plot: m.plot_X_1d("MRD Latent Space 1D") m.plot_scales("MRD Scales") return m
def bgplvm_simulation(optimize='scg', plot=True, max_f_eval=2e4): # from GPy.core.transformations import logexp_clipped D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot) from GPy.models import mrd from GPy import kern reload(mrd); reload(kern) Y = Ylist[0] k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True) # m.constrain('variance|noise', logexp_clipped()) m['noise'] = Y.var() / 100. m['linear_variance'] = .01 if optimize: print "Optimizing model:" m.optimize(optimize, max_iters=max_f_eval, max_f_eval=max_f_eval, messages=True, gtol=.05) if plot: m.plot_X_1d("BGPLVM Latent Space 1D") m.kern.plot_ARD('BGPLVM Simulation ARD Parameters') return m
def bgplvm_simulation_matlab_compare(): from GPy.util.datasets import simulation_BGPLVM sim_data = simulation_BGPLVM() Y = sim_data['Y'] S = sim_data['S'] mu = sim_data['mu'] num_inducing, [_, Q] = 3, mu.shape from GPy.models import mrd from GPy import kern reload(mrd); reload(kern) k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, # X=mu, # X_variance=S, _debug=False) m.auto_scale_factor = True m['noise'] = Y.var() / 100. m['linear_variance'] = .01 return m
def bgplvm_simulation(optimize=True, verbose=1, plot=True, plot_sim=False, max_iters=2e4, ): from GPy import kern from GPy.models import BayesianGPLVM D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) Y = Ylist[0] k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k) m['noise'] = Y.var() / 100. if optimize: print "Optimizing model:" m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05) if plot: m.plot_X_1d("BGPLVM Latent Space 1D") m.kern.plot_ARD('BGPLVM Simulation ARD Parameters') return m
def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): from GPy import kern from GPy.models import MRD from GPy.likelihoods import Gaussian D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) likelihood_list = [Gaussian(x, normalize=True) for x in Ylist] k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2)) m = MRD(likelihood_list, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw) m.ensure_default_constraints() for i, bgplvm in enumerate(m.bgplvms): m['{}_noise'.format(i)] = bgplvm.likelihood.Y.var() / 500. if optimize: print "Optimizing Model:" m.optimize(messages=verbose, max_iters=8e3, gtol=.1) if plot: m.plot_X_1d("MRD Latent Space 1D") m.plot_scales("MRD Scales") return m
import numpy as np import pylab as pb import sys from GPy import kern from colvb import MOHGP np.random.seed(1) pb.close('all') #cool structed GP demo Nclust = 20 Nx = 12 Nobs = [np.random.randint(20,21) for i in range(Nclust)] X = np.random.rand(Nx,1)*5 X.sort(0) Kf = kern.rbf(1) + kern.white(1, 1e-6) S = Kf.K(X) means = np.vstack([np.tile(np.random.multivariate_normal(np.zeros(Nx),S,1),(N,1)) for N in Nobs]) # GP draws for mean of each cluster #add GP draw for noise Ky = kern.rbf(1,0.3,1) + kern.white(1,0.001) Y = means + np.random.multivariate_normal(np.zeros(Nx),Ky.K(X),means.shape[0]) #construct model m = MOHGP(X, Kf.copy(), Ky.copy(), Y, K=Nclust) m.constrain_positive('') m.optimize() m.preferred_optimizer='bfgs' m.systematic_splits() m.remove_empty_clusters(1e-3)
import numpy as np import pylab as pb import sys from GPy import kern from colvb import MOHGP np.random.seed(1) pb.close('all') #cool structed GP demo Nclust = 20 Nx = 12 Nobs = [np.random.randint(20, 21) for i in range(Nclust)] X = np.random.rand(Nx, 1) * 5 X.sort(0) Kf = kern.rbf(1) + kern.white(1, 1e-6) S = Kf.K(X) means = np.vstack([ np.tile(np.random.multivariate_normal(np.zeros(Nx), S, 1), (N, 1)) for N in Nobs ]) # GP draws for mean of each cluster #add GP draw for noise Ky = kern.rbf(1, 0.3, 1) + kern.white(1, 0.001) Y = means + np.random.multivariate_normal(np.zeros(Nx), Ky.K(X), means.shape[0]) #construct model m = MOHGP(X, Kf.copy(), Ky.copy(), Y, K=Nclust) m.constrain_positive('')
freqs = 2*np.pi + 0.3*(np.random.rand(Nclust)-.5) phases = 2*np.pi*np.random.rand(Nclust) means = np.vstack([np.tile(np.sin(f*X+p).T,(Ni,1)) for f,p,Ni in zip(freqs,phases,Nobs)]) #add a lower freq sin for the noise freqs = .4*np.pi + 0.01*(np.random.rand(means.shape[0])-.5) phases = 2*np.pi*np.random.rand(means.shape[0]) offsets = 0.3*np.vstack([np.sin(f*X+p).T for f,p in zip(freqs,phases)]) Y = means + offsets + np.random.randn(*means.shape)*0.05 #construct full model Kf = kern.rbf(1,0.01,0.001) Ky1 = kern.rbf(1,0.1,0.001) Ky2 = kern.white(1,0.01) Ky = Ky1 + Ky2 m = MOHGP(X,Kf,Ky,Y, K=Nclust, prior_Z = 'DP', alpha=alpha) m.ensure_default_constraints() m.checkgrad(verbose=1) m.randomize() m.optimize() m.systematic_splits() m.systematic_splits() m.plot(1,1,1,0,0,1) #construct model without structure #give it a fighting chance by normalising signals first Y = Y.copy() Y -= Y.mean(1)[:,None]
phases = 2 * np.pi * np.random.rand(Nclust) means = np.vstack([ np.tile(np.sin(f * X + p).T, (Ni, 1)) for f, p, Ni in zip(freqs, phases, Nobs) ]) #add a lower freq sin for the noise freqs = .4 * np.pi + 0.01 * (np.random.rand(means.shape[0]) - .5) phases = 2 * np.pi * np.random.rand(means.shape[0]) offsets = 0.3 * np.vstack([np.sin(f * X + p).T for f, p in zip(freqs, phases)]) Y = means + offsets + np.random.randn(*means.shape) * 0.05 #construct full model Kf = kern.rbf(1, 0.01, 0.001) Ky1 = kern.rbf(1, 0.1, 0.001) Ky2 = kern.white(1, 0.01) Ky = Ky1 + Ky2 m = MOHGP(X, Kf, Ky, Y, K=Nclust, prior_Z='DP', alpha=alpha) m.ensure_default_constraints() m.checkgrad(verbose=1) m.randomize() m.optimize() m.systematic_splits() m.systematic_splits() m.plot(1, 1, 1, 0, 0, 1) #construct model without structure #give it a fighting chance by normalising signals first Y = Y.copy() Y -= Y.mean(1)[:, None]