def run_demo(): LG.basicConfig(level=LG.DEBUG) SP.random.seed(10) #1. create toy data x, y, z, sigma, X, actual_inv, L = create_toy_data() n_dimensions = 1 n_terms = 3 # build GP likelihood = lik.GaussLikISO() covar_parms = SP.log([1, 1, 1E-5]) hyperparams = { 'covar': covar_parms, 'lik': SP.log([sigma]), 'warping': (1E-2 * SP.random.randn(n_terms, 3)) } SECF = se.SqexpCFARD(n_dimensions=n_dimensions) muCF = mu.MuCF(N=X.shape[0]) covar = combinators.SumCF([SECF, muCF]) warping_function = TanhWarpingFunction(n_terms=n_terms) mean_function = LinMeanFunction(X=SP.ones([x.shape[0], 1])) hyperparams['mean'] = 1E-2 * SP.randn(1) bounds = {} bounds.update(warping_function.get_bounds()) gp = WARPEDGP(warping_function=warping_function, mean_function=mean_function, covar_func=covar, likelihood=likelihood, x=x, y=z) opt_model_params = opt.opt_hyper(gp, hyperparams, bounds=bounds, gradcheck=True)[0] print "WARPED GP (neg) likelihood: ", gp.LML(hyperparams) #hyperparams['mean'] = SP.log(1) PL.figure() PL.plot(z) PL.plot(warping_function.f(y, hyperparams['warping'])) PL.legend(["real function", "larnt function"]) PL.figure() PL.plot(actual_inv) PL.plot(warping_function.f_inv(gp.y, hyperparams['warping'])) PL.legend(['real inverse', 'learnt inverse']) hyperparams.pop("warping") hyperparams.pop("mean") gp = GP(covar, likelihood=likelihood, x=x, y=y) opt_model_params = opt.opt_hyper(gp, hyperparams, gradcheck=False)[0] print "GP (neg) likelihood: ", gp.LML(hyperparams)
def run_demo(): LG.basicConfig(level=LG.DEBUG) SP.random.seed(10) #1. create toy data x,y,z,sigma,X,actual_inv,L = create_toy_data() n_dimensions = 1 n_terms = 3 # build GP likelihood = lik.GaussLikISO() covar_parms = SP.log([1,1,1E-5]) hyperparams = {'covar':covar_parms,'lik':SP.log([sigma]), 'warping': (1E-2*SP.random.randn(n_terms,3))} SECF = se.SqexpCFARD(n_dimensions=n_dimensions) muCF = mu.MuCF(N=X.shape[0]) covar = combinators.SumCF([SECF,muCF]) warping_function = TanhWarpingFunction(n_terms=n_terms) mean_function = LinMeanFunction(X= SP.ones([x.shape[0],1])) hyperparams['mean'] = 1E-2* SP.randn(1) bounds = {} bounds.update(warping_function.get_bounds()) gp = WARPEDGP(warping_function = warping_function, mean_function = mean_function, covar_func=covar, likelihood=likelihood, x=x, y=z) opt_model_params = opt.opt_hyper(gp, hyperparams, bounds = bounds, gradcheck=True)[0] print "WARPED GP (neg) likelihood: ", gp.LML(hyperparams) #hyperparams['mean'] = SP.log(1) PL.figure() PL.plot(z) PL.plot(warping_function.f(y,hyperparams['warping'])) PL.legend(["real function", "larnt function"]) PL.figure() PL.plot(actual_inv) PL.plot(warping_function.f_inv(gp.y,hyperparams['warping'])) PL.legend(['real inverse','learnt inverse']) hyperparams.pop("warping") hyperparams.pop("mean") gp = GP(covar,likelihood=likelihood,x=x,y=y) opt_model_params = opt.opt_hyper(gp,hyperparams, gradcheck=False)[0] print "GP (neg) likelihood: ", gp.LML(hyperparams)
def run_demo(): LG.basicConfig(level=LG.INFO) random.seed(1) #1. create toy data [x,y] = create_toy_data() n_dimensions = 1 #2. location of unispaced predictions X = SP.linspace(0,10,100)[:,SP.newaxis] if 0: #old interface where the covaraince funciton and likelihood are one thing: #hyperparamters covar_parms = SP.log([1,1,1]) hyperparams = {'covar':covar_parms} #construct covariance function SECF = se.SqexpCFARD(n_dimensions=n_dimensions) noiseCF = noise.NoiseCFISO() covar = combinators.SumCF((SECF,noiseCF)) covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp,[1,2]]) covar_priors.extend([[lnpriors.lnGammaExp,[1,1]] for i in xrange(n_dimensions)]) #noise covar_priors.append([lnpriors.lnGammaExp,[1,1]]) priors = {'covar':covar_priors} likelihood = None if 1: #new interface with likelihood parametres being decoupled from the covaraince function likelihood = lik.GaussLikISO() covar_parms = SP.log([1,1]) hyperparams = {'covar':covar_parms,'lik':SP.log([1])} #construct covariance function SECF = se.SqexpCFARD(n_dimensions=n_dimensions) covar = SECF covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp,[1,2]]) covar_priors.extend([[lnpriors.lnGammaExp,[1,1]] for i in xrange(n_dimensions)]) lik_priors = [] #noise lik_priors.append([lnpriors.lnGammaExp,[1,1]]) priors = {'covar':covar_priors,'lik':lik_priors} gp = GP(covar,likelihood=likelihood,x=x,y=y) opt_model_params = opt.opt_hyper(gp,hyperparams,priors=priors,gradcheck=False)[0] #predict [M,S] = gp.predict(opt_model_params,X) #create plots gpr_plot.plot_sausage(X,M,SP.sqrt(S)) gpr_plot.plot_training_data(x,y) PL.show()
def run_demo(): LG.basicConfig(level=LG.INFO) #1. simulate data from a linear PCA model N = 25 K = 5 D = 200 SP.random.seed(1) S = SP.random.randn(N,K) W = SP.random.randn(D,K) Y = SP.dot(W,S.T).T Y+= 0.5*SP.random.randn(N,D) #use "standard PCA" [Spca,Wpca] = gplvm.PCA(Y,K) #reconstruction Y_ = SP.dot(Spca,Wpca.T) if 1: #use linear kernel covariance = linear.LinearCFISO(n_dimensions=K) hyperparams = {'covar': SP.log([1.2])} if 0: #use ARD kernel covariance = se.SqexpCFARD(n_dimensions=K) hyperparams = {'covar': SP.log([1]*(K+1))} #initialization of X at arandom X0 = SP.random.randn(N,K) X0 = Spca hyperparams['x'] = X0 #standard Gaussian noise likelihood = lik.GaussLikISO() hyperparams['lik'] = SP.log([0.1]) g = gplvm.GPLVM(covar_func=covariance,likelihood=likelihood,x=X0,y=Y,gplvm_dimensions=SP.arange(X0.shape[1])) #specify optimization bounds: bounds = {} bounds['lik'] = SP.array([[-5.,5.]]*D) hyperparams['x'] = X0 print "running standard gplvm" [opt_hyperparams,opt_lml2] = opt.opt_hyper(g,hyperparams,gradcheck=False) print "optimized latent X:" print opt_hyperparams['x']
Ifilter_fa[key] = SP.ones(hyperparams_fa[key].shape,dtype='bool') #Ifilter_fa['lik'][:] = False Ifilter = {} for key in hyperparams: Ifilter[key] = SP.ones(hyperparams[key].shape,dtype='bool') #Ifilter['lik'][:] = False #[opt_hyperparams,opt_lml] = opt.opt_hyper(g,hyperparams,gradcheck=True,Ifilter=Ifilter) # hyperparams['covar'] = opt_hyperparams['covar'] # hyperparams['x'] = opt_hyperparams['x'] # hyperparams_fa['covar'] = opt_hyperparams['covar'] # hyperparams_fa['x'] = opt_hyperparams['x'] [opt_hyperparams_fa,opt_lml_fa] = opt.opt_hyper(g_fa,hyperparams_fa,gradcheck=True,Ifilter=Ifilter) #[opt_hyperparams_fa,opt_lml_fa] = optimize_test.opt_hyper(g_fa,g,hyperparams_fa,hyperparams,Ifilter=Ifilter_fa,Ifilter2=Ifilter,gradcheck=True) if 0: lml=g.LML(opt_hyperparams) lml_fa = g_fa.LML(hyperparams_fa) dg = g.LMLgrad(opt_hyperparams) dg_fa = g_fa.LMLgrad(hyperparams_fa) #[opt_hyperparams,opt_lml] = opt.opt_hyper(g_fa,hyperparams_fa,gradcheck=True,Ifilter=Ifilter_fa)
def run_demo(): LG.basicConfig(level=LG.INFO) PL.figure() random.seed(1) #0. generate Toy-Data; just samples from a superposition of a sin + linear trend n_replicates = 4 xmin = 1 xmax = 2.5 * SP.pi x1_time_steps = 10 x2_time_steps = 20 x1 = SP.zeros(x1_time_steps * n_replicates) x2 = SP.zeros(x2_time_steps * n_replicates) for i in xrange(n_replicates): x1[i * x1_time_steps:(i + 1) * x1_time_steps] = SP.linspace( xmin, xmax, x1_time_steps) x2[i * x2_time_steps:(i + 1) * x2_time_steps] = SP.linspace( xmin, xmax, x2_time_steps) C = 2 #offset #b = 0.5 sigma1 = 0.15 sigma2 = 0.15 n_noises = 1 b = 0 y1 = b * x1 + C + 1 * SP.sin(x1) # dy1 = b + 1*SP.cos(x1) y1 += sigma1 * random.randn(y1.shape[0]) y1 -= y1.mean() y2 = b * x2 + C + 1 * SP.sin(x2) # dy2 = b + 1*SP.cos(x2) y2 += sigma2 * random.randn(y2.shape[0]) y2 -= y2.mean() for i in xrange(n_replicates): x1[i * x1_time_steps:(i + 1) * x1_time_steps] += .7 + (i / 2.) x2[i * x2_time_steps:(i + 1) * x2_time_steps] -= .7 + (i / 2.) x1 = x1[:, SP.newaxis] x2 = x2[:, SP.newaxis] x = SP.concatenate((x1, x2), axis=0) y = SP.concatenate((y1, y2), axis=0) #predictions: X = SP.linspace(xmin - n_replicates, xmax + n_replicates, 100 * n_replicates)[:, SP.newaxis] #hyperparamters dim = 1 replicate_indices = [] for i, xi in enumerate((x1, x2)): for rep in SP.arange(i * n_replicates, (i + 1) * n_replicates): replicate_indices.extend(SP.repeat(rep, len(xi) / n_replicates)) replicate_indices = SP.array(replicate_indices) n_replicates = len(SP.unique(replicate_indices)) logthetaCOVAR = [1, 1] logthetaCOVAR.extend(SP.repeat(SP.exp(1), n_replicates)) logthetaCOVAR.extend([sigma1]) logthetaCOVAR = SP.log(logthetaCOVAR) #,sigma2]) hyperparams = {'covar': logthetaCOVAR} SECF = se.SqexpCFARD(dim) #noiseCF = noise.NoiseReplicateCF(replicate_indices) noiseCF = noise.NoiseCFISO() shiftCF = combinators.ShiftCF(SECF, replicate_indices) CovFun = combinators.SumCF((shiftCF, noiseCF)) covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp, [1, 2]]) for i in range(dim): covar_priors.append([lnpriors.lnGammaExp, [1, 1]]) #shift for i in range(n_replicates): covar_priors.append([lnpriors.lnGauss, [0, .5]]) #noise for i in range(n_noises): covar_priors.append([lnpriors.lnGammaExp, [1, 1]]) covar_priors = SP.array(covar_priors) priors = {'covar': covar_priors} Ifilter = {'covar': SP.ones(n_replicates + 3)} gpr = GP(CovFun, x=x, y=y) opt_model_params = opt_hyper(gpr, hyperparams, priors=priors, gradcheck=False, Ifilter=Ifilter)[0] #predict [M, S] = gpr.predict(opt_model_params, X) T = opt_model_params['covar'][2:2 + n_replicates] PL.subplot(212) gpr_plot.plot_sausage(X, M, SP.sqrt(S), format_line=dict(alpha=1, color='g', lw=2, ls='-')) gpr_plot.plot_training_data(x, y, shift=T, replicate_indices=replicate_indices, draw_arrows=2) PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23) PL.title("Regression including time shift") PL.xlabel("x") PL.ylabel("y") ylim = PL.ylim() gpr = GP(combinators.SumCF((SECF, noiseCF)), x=x, y=y) priors = {'covar': covar_priors[[0, 1, -1]]} hyperparams = {'covar': logthetaCOVAR[[0, 1, -1]]} opt_model_params = opt_hyper(gpr, hyperparams, priors=priors, gradcheck=False)[0] PL.subplot(211) #predict [M, S] = gpr.predict(opt_model_params, X) gpr_plot.plot_sausage(X, M, SP.sqrt(S), format_line=dict(alpha=1, color='g', lw=2, ls='-')) gpr_plot.plot_training_data(x, y, replicate_indices=replicate_indices) PL.title("Regression without time shift") PL.xlabel("x") PL.ylabel("y") PL.ylim(ylim) PL.subplots_adjust(left=.1, bottom=.1, right=.96, top=.8, wspace=.4, hspace=.4) PL.show()
Ifilter_fa = {} for key in hyperparams_fa: Ifilter_fa[key] = SP.ones(hyperparams_fa[key].shape, dtype='bool') #Ifilter_fa['lik'][:] = False Ifilter = {} for key in hyperparams: Ifilter[key] = SP.ones(hyperparams[key].shape, dtype='bool') #Ifilter['lik'][:] = False #[opt_hyperparams,opt_lml] = opt.opt_hyper(g,hyperparams,gradcheck=True,Ifilter=Ifilter) # hyperparams['covar'] = opt_hyperparams['covar'] # hyperparams['x'] = opt_hyperparams['x'] # hyperparams_fa['covar'] = opt_hyperparams['covar'] # hyperparams_fa['x'] = opt_hyperparams['x'] [opt_hyperparams_fa, opt_lml_fa] = opt.opt_hyper(g_fa, hyperparams_fa, gradcheck=True, Ifilter=Ifilter) #[opt_hyperparams_fa,opt_lml_fa] = optimize_test.opt_hyper(g_fa,g,hyperparams_fa,hyperparams,Ifilter=Ifilter_fa,Ifilter2=Ifilter,gradcheck=True) if 0: lml = g.LML(opt_hyperparams) lml_fa = g_fa.LML(hyperparams_fa) dg = g.LMLgrad(opt_hyperparams) dg_fa = g_fa.LMLgrad(hyperparams_fa) #[opt_hyperparams,opt_lml] = opt.opt_hyper(g_fa,hyperparams_fa,gradcheck=True,Ifilter=Ifilter_fa)
def run_demo(): LG.basicConfig(level=LG.INFO) random.seed(1) #1. create toy data [x, y] = create_toy_data() n_dimensions = 1 #2. location of unispaced predictions X = SP.linspace(0, 10, 100)[:, SP.newaxis] if 0: #old interface where the covaraince funciton and likelihood are one thing: #hyperparamters covar_parms = SP.log([1, 1, 1]) hyperparams = {'covar': covar_parms} #construct covariance function SECF = se.SqexpCFARD(n_dimensions=n_dimensions) noiseCF = noise.NoiseCFISO() covar = combinators.SumCF((SECF, noiseCF)) covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp, [1, 2]]) covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]] for i in xrange(n_dimensions)]) #noise covar_priors.append([lnpriors.lnGammaExp, [1, 1]]) priors = {'covar': covar_priors} likelihood = None if 1: #new interface with likelihood parametres being decoupled from the covaraince function likelihood = lik.GaussLikISO() covar_parms = SP.log([1, 1]) hyperparams = {'covar': covar_parms, 'lik': SP.log([1])} #construct covariance function SECF = se.SqexpCFARD(n_dimensions=n_dimensions) covar = SECF covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp, [1, 2]]) covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]] for i in xrange(n_dimensions)]) lik_priors = [] #noise lik_priors.append([lnpriors.lnGammaExp, [1, 1]]) priors = {'covar': covar_priors, 'lik': lik_priors} gp = GP(covar, likelihood=likelihood, x=x, y=y) opt_model_params = opt.opt_hyper(gp, hyperparams, priors=priors, gradcheck=False)[0] #predict [M, S] = gp.predict(opt_model_params, X) #create plots gpr_plot.plot_sausage(X, M, SP.sqrt(S)) gpr_plot.plot_training_data(x, y) PL.show()
def run_demo(): LG.basicConfig(level=LG.INFO) PL.figure() random.seed(1) #0. generate Toy-Data; just samples from a superposition of a sin + linear trend n_replicates = 4 xmin = 1 xmax = 2.5*SP.pi x1_time_steps = 10 x2_time_steps = 20 x1 = SP.zeros(x1_time_steps*n_replicates) x2 = SP.zeros(x2_time_steps*n_replicates) for i in xrange(n_replicates): x1[i*x1_time_steps:(i+1)*x1_time_steps] = SP.linspace(xmin,xmax,x1_time_steps) x2[i*x2_time_steps:(i+1)*x2_time_steps] = SP.linspace(xmin,xmax,x2_time_steps) C = 2 #offset #b = 0.5 sigma1 = 0.15 sigma2 = 0.15 n_noises = 1 b = 0 y1 = b*x1 + C + 1*SP.sin(x1) # dy1 = b + 1*SP.cos(x1) y1 += sigma1*random.randn(y1.shape[0]) y1-= y1.mean() y2 = b*x2 + C + 1*SP.sin(x2) # dy2 = b + 1*SP.cos(x2) y2 += sigma2*random.randn(y2.shape[0]) y2-= y2.mean() for i in xrange(n_replicates): x1[i*x1_time_steps:(i+1)*x1_time_steps] += .7 + (i/2.) x2[i*x2_time_steps:(i+1)*x2_time_steps] -= .7 + (i/2.) x1 = x1[:,SP.newaxis] x2 = x2[:,SP.newaxis] x = SP.concatenate((x1,x2),axis=0) y = SP.concatenate((y1,y2),axis=0) #predictions: X = SP.linspace(xmin-n_replicates,xmax+n_replicates,100*n_replicates)[:,SP.newaxis] #hyperparamters dim = 1 replicate_indices = [] for i,xi in enumerate((x1,x2)): for rep in SP.arange(i*n_replicates, (i+1)*n_replicates): replicate_indices.extend(SP.repeat(rep,len(xi)/n_replicates)) replicate_indices = SP.array(replicate_indices) n_replicates = len(SP.unique(replicate_indices)) logthetaCOVAR = [1,1] logthetaCOVAR.extend(SP.repeat(SP.exp(1),n_replicates)) logthetaCOVAR.extend([sigma1]) logthetaCOVAR = SP.log(logthetaCOVAR)#,sigma2]) hyperparams = {'covar':logthetaCOVAR} SECF = se.SqexpCFARD(dim) #noiseCF = noise.NoiseReplicateCF(replicate_indices) noiseCF = noise.NoiseCFISO() shiftCF = combinators.ShiftCF(SECF,replicate_indices) CovFun = combinators.SumCF((shiftCF,noiseCF)) covar_priors = [] #scale covar_priors.append([lnpriors.lnGammaExp,[1,2]]) for i in range(dim): covar_priors.append([lnpriors.lnGammaExp,[1,1]]) #shift for i in range(n_replicates): covar_priors.append([lnpriors.lnGauss,[0,.5]]) #noise for i in range(n_noises): covar_priors.append([lnpriors.lnGammaExp,[1,1]]) covar_priors = SP.array(covar_priors) priors = {'covar':covar_priors} Ifilter = {'covar': SP.ones(n_replicates+3)} gpr = GP(CovFun,x=x,y=y) opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=False,Ifilter=Ifilter)[0] #predict [M,S] = gpr.predict(opt_model_params,X) T = opt_model_params['covar'][2:2+n_replicates] PL.subplot(212) gpr_plot.plot_sausage(X,M,SP.sqrt(S),format_line=dict(alpha=1,color='g',lw=2, ls='-')) gpr_plot.plot_training_data(x,y,shift=T,replicate_indices=replicate_indices,draw_arrows=2) PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23) PL.title("Regression including time shift") PL.xlabel("x") PL.ylabel("y") ylim = PL.ylim() gpr = GP(combinators.SumCF((SECF,noiseCF)),x=x,y=y) priors = {'covar':covar_priors[[0,1,-1]]} hyperparams = {'covar':logthetaCOVAR[[0,1,-1]]} opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=False)[0] PL.subplot(211) #predict [M,S] = gpr.predict(opt_model_params,X) gpr_plot.plot_sausage(X,M,SP.sqrt(S),format_line=dict(alpha=1,color='g',lw=2, ls='-')) gpr_plot.plot_training_data(x,y,replicate_indices=replicate_indices) PL.title("Regression without time shift") PL.xlabel("x") PL.ylabel("y") PL.ylim(ylim) PL.subplots_adjust(left=.1, bottom=.1, right=.96, top=.8, wspace=.4, hspace=.4) PL.show()
logthetaCOVAR = SP.log([.4, 3.2, 0.3])#,sigma2]) hyperparams = {'covar':logthetaCOVAR} covar_priors = SP.array(covar_priors) priors = {'covar' : covar_priors[[0, 1, 2]]} priors_BP = {'covar' : covar_priors} Ifilter = {'covar' : SP.array([1, 1, 1], dtype='int')} Ifilter_BP = {'covar' : SP.array([1, 1, 1, 0], dtype='int')} #gpr_BP = GPR.GP(CovFun,x=x,y=y) gpr_BP = GP(CovFun, x=x.reshape(-1,1), y=y.reshape(-1,1)) # gpr_opt_hyper = GP(combinators.SumCF((SECF,noiseCF)),x=x,y=y) gpr_opt_hyper = GroupGP((GP(combinators.SumCF((SECF, noiseCF)), x=x1.reshape(-1,1), y=C[1].reshape(-1,1)), GP(combinators.SumCF((SECF, noiseCF)), x=x2.reshape(-1,1), y=T[1].reshape(-1,1)))) [opt_model_params, opt_lml] = opt_hyper(gpr_opt_hyper, hyperparams, priors=priors, gradcheck=False, Ifilter=Ifilter) # opt_model_params = hyperparams print SP.exp(opt_model_params['covar']) # import copy # _hyperparams = copy.deepcopy(opt_model_params) # _logtheta = SP.array([0,0,0,0],dtype='double') # _logtheta[:2] = _hyperparams['covar'][:2] # _logtheta[3] = _hyperparams['covar'][2] # _hyperparams['covar'] = _logtheta #[opt_model_params,opt_lml]=GPR.optHyper(gpr_BP,hyperparams,priors=priors,gradcheck=True,Ifilter=Ifilter) import pygp.plot.gpr_plot as gpr_plot first = True [M, S] = gpr_opt_hyper.predict(opt_model_params, X)
priors = {"covar": covar_priors[[0, 1, 2]]} priors_BP = {"covar": covar_priors} Ifilter = {"covar": SP.array([1, 1, 1], dtype="int")} Ifilter_BP = {"covar": SP.array([1, 1, 1, 0], dtype="int")} # gpr_BP = GPR.GP(CovFun,x=x,y=y) gpr_BP = GP(CovFun, x=x.reshape(-1, 1), y=y.reshape(-1, 1)) # gpr_opt_hyper = GP(combinators.SumCF((SECF,noiseCF)),x=x,y=y) gpr_opt_hyper = GroupGP( ( GP(combinators.SumCF((SECF, noiseCF)), x=x1.reshape(-1, 1), y=C[1].reshape(-1, 1)), GP(combinators.SumCF((SECF, noiseCF)), x=x2.reshape(-1, 1), y=T[1].reshape(-1, 1)), ) ) [opt_model_params, opt_lml] = opt_hyper(gpr_opt_hyper, hyperparams, priors=priors, gradcheck=False, Ifilter=Ifilter) # opt_model_params = hyperparams print SP.exp(opt_model_params["covar"]) # import copy # _hyperparams = copy.deepcopy(opt_model_params) # _logtheta = SP.array([0,0,0,0],dtype='double') # _logtheta[:2] = _hyperparams['covar'][:2] # _logtheta[3] = _hyperparams['covar'][2] # _hyperparams['covar'] = _logtheta # [opt_model_params,opt_lml]=GPR.optHyper(gpr_BP,hyperparams,priors=priors,gradcheck=True,Ifilter=Ifilter) import pygp.plot.gpr_plot as gpr_plot first = True