def slfm_gp(num_outputs, slfm_rank): kgen = lambda: [] rgen = lambda: [] slfmgen = lambda: [RBF(name='slfm{}'.format(i)) for i in range(slfm_rank)] indepgen = lambda: [ Scaled(RBF(name='rbf{}'.format(i))) for i in range(num_outputs) ] return kgen, rgen, slfmgen, indepgen
def gen_random_k(): """Generates a Q=2 SLFM kernel with varying lengthscales for 5 outputs.""" ks = [ RBF(name='rbf1', inv_lengthscale=5), RBF(name='rbf2', inv_lengthscale=0.5) ] indeps = [ RBF(name='indep{}'.format(i), inv_lengthscale=2) for i in range(5) ] return FunctionalKernel(D=5, lmc_kernels=[], lmc_ranks=[], slfm_kernels=ks, indep_gp=indeps)
def gen_kernels(q): kern_funcs = [RBF, lambda period: StdPeriodic(1, period), Matern32] kernels = [[kfunc(gamma) for gamma in np.logspace(0, 1, q)] for kfunc in kern_funcs] mix = [ kfunc(gamma) for gamma in np.logspace(0, 1, max(q // 3, 1)) for kfunc in kern_funcs ] if len(mix) > q: mix = mix[:q] else: for i in range(len(mix), q): mix.append(RBF(1)) return kernels + [mix]
def toy_sinusoid(): # Adapts the 2-output toy problem from # Collaborative Multi-output Gaussian Processes # Nguyen and Bonilla et al. 2014 # Their example uses a grid of inputs. To make it harder (for runlmc) # we instead look at uniformly distributed inputs. sz = 1500 xss = [np.random.uniform(-10, 10, size=sz) for _ in range(2)] f1 = lambda x: np.sin(x) + 1e-7 + np.random.randn(len(x)) * 1e-2 f2 = lambda x: -np.sin(x) + 1e-7 + np.random.randn(len(x)) * 1e-2 yss = [f1(xss[0]), f2(xss[1])] ks = [RBF(name='rbf0')] ranks = [1] pred_xss = [np.linspace(-11, 11, 100) for _ in range(2)] test_yss = [f1(pred_xss[0]), f2(pred_xss[1])] return None
import matplotlib.pyplot as plt np.random.seed(1234) # In[2]: n_per_output = [65, 100] xss = list(map(np.random.rand, n_per_output)) nout = len(n_per_output) yss = [np.sin(2 * np.pi * xs + i * 2 * np.pi / nout) + np.random.randn(len(xs)) * (i + 1) * 0.1 / nout for i, xs in enumerate(xss)] ks = [RBF(name='rbf{}'.format(i)) for i in range(nout)] ranks = [1, 1] fk = FunctionalKernel(D=len(xss), lmc_kernels=ks, lmc_ranks=ranks) # In[3]: def print_diagram(lmc): plot_xs = np.arange(0, 1, 0.01) ys, var = lmc.predict([plot_xs for _ in range(nout)]) for i, (y, v, xs, ys) in enumerate(zip(ys, var, xss, yss)): sd = np.sqrt(v) order = xs.argsort() plt.scatter(xs[order], ys[order]) plt.title('output {} (95%)'.format(i))
def alvarez_and_lawrence_gp(): # Nguyen 2014 COGP uses Q=2 R=1, but that is not LMC # Álvarez and Lawrence 2010 Convolved GP has R=4, sort of. # Álvarez and Lawrence 2010 find that vanilla LMC works best with Q=1 R=2 # that is what we use here return lambda: [RBF(name='rbf0')], lambda: [2], lambda: [], lambda: []
from runlmc.models.optimization import AdaDelta from runlmc.models.gpy_lmc import GPyLMC from runlmc.lmc.functional_kernel import FunctionalKernel import sys outdir = sys.argv[1] + '/' print('publishing results into out directory', outdir) print('FX2007 picture') # Nguyen 2014 COGP uses Q=2 R=1, but that is not LMC # Álvarez and Lawrence 2010 Convolved GP has R=4, sort of. # Álvarez and Lawrence 2010 find that vanilla LMC works best with Q=1 R=2 # that is what we use here ks = [RBF(name='rbf0')] ranks = [2] # the columns with nonzero test holdout are in test_fx xss, yss, test_xss, test_yss, test_fx, cols = foreign_exchange_2007() np.random.seed(1234) fk = FunctionalKernel(D=len(xss), lmc_kernels=ks, lmc_ranks=ranks) lmc = InterpolatedLLGP(xss, yss, functional_kernel=fk, max_procs=(nthreads if nthreads else None)) opt = AdaDelta(verbosity=20, min_grad_ratio=0.2) print('training LLGP') with contexttimer.Timer() as t: lmc.optimize(optimizer=opt) pred_yss, pred_vss = lmc.predict(test_xss)
def synth_gen(): """Kernel generators associated for gen_random_k()""" return lambda: [], lambda: [], \ lambda: [RBF(name='rbf1'), RBF(name='rbf2')], \ lambda: [RBF(name='indep{}'.format(i)) for i in range(5)]
def indepgen(): return [ Scaled(RBF(name='rbf{}'.format(i))) for i in range(num_outputs) ]
def slfmgen(): return [RBF(name='slfm{}'.format(i)) for i in range(slfm_rank)]