Exemplo n.º 1
0
def run_demo():
    LG.basicConfig(level=LG.DEBUG)
    SP.random.seed(10)

    #1. create toy data
    x,y,z,sigma,X,actual_inv,L = create_toy_data()
    n_dimensions = 1
    n_terms = 3
    # build GP
    likelihood = GaussLikISO()
    covar_parms = SP.log([1,1,1E-5])
    hyperparams = {'covar':covar_parms,'lik':SP.log([sigma]), 'warping': (1E-2*SP.random.randn(n_terms,3))}

    SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
    covar = combinators.SumCF([SECF,BiasCF(n_dimensions=n_dimensions)])
    warping_function = TanhWarpingFunction(n_terms=n_terms)
    mean_function    = LinMeanFunction(X= SP.ones([x.shape[0],1]))
    hyperparams['mean'] = 1E-2* SP.randn(1)
    bounds = {}
    bounds.update(warping_function.get_bounds())

    gp = WARPEDGP(warping_function = warping_function,
		  mean_function = mean_function,
		  covar_func=covar, likelihood=likelihood, x=x, y=z)
    opt_model_params = opt_hyper(gp, hyperparams,
				 bounds = bounds,
				 gradcheck=True)[0]

    print "WARPED GP (neg) likelihood: ", gp.LML(hyperparams)

    #hyperparams['mean'] = SP.log(1)
    PL.figure()
    PL.plot(z)
    PL.plot(warping_function.f(y,hyperparams['warping']))
    PL.legend(["real function", "larnt function"])
    
    PL.figure()
    PL.plot(actual_inv)
    PL.plot(warping_function.f_inv(gp.y,hyperparams['warping']))
    PL.legend(['real inverse','learnt inverse'])

    hyperparams.pop("warping")
    hyperparams.pop("mean")    
    gp = GP(covar,likelihood=likelihood,x=x,y=y)
    opt_model_params = opt_hyper(gp,hyperparams,
				     gradcheck=False)[0]
    print "GP (neg) likelihood: ", gp.LML(hyperparams)
    def optimize(self, initial_hyperparams,
                 bounds=None, 
                 burniniter=200,
                 gradient_tolerance=1e-15, 
                 maxiter=15E3,
                 release_beta=True):
        ifilter = self.get_filter(initial_hyperparams)
        for key in ifilter.iterkeys():
            if key.startswith('beta'):
                ifilter[key][:] = 0

        # fix beta for burn in:
        hyperparams, _ = opt_hyper(self, initial_hyperparams,
                                   gradient_tolerance=gradient_tolerance, maxiter=burniniter,
                                   Ifilter=ifilter, bounds=bounds)
        if(release_beta):
            hyperparams, _ = opt_hyper(self, hyperparams,
                                       gradient_tolerance=gradient_tolerance,
                                       bounds=bounds, maxiter=maxiter)
        return hyperparams
Exemplo n.º 3
0
    def predict_model_likelihoods(self, training_data=None, interval_indices=get_model_structure(), *args, **kwargs):
        """
        Predict the probabilities of the models (individual and common) to describe the data.
        It will optimize hyperparameters respectively.

        **Parameters**:

        training_data : dict traning_data
            The training data to learn from. Input are time-values and
            output are expression-values of e.g. a timeseries.
            If not given, training data must be given previously by
            :py:class:`gptwosample.twosample.basic.set_data`.

        interval_indices: :py:class:`gptwosample.data.data_base.get_model_structure()`
            interval indices, which assign data to individual or common model,
            respectively.

        args : [..]
            see :py:class:`pygp.gpr.gp_base.GP`

        kwargs : {..}
            see :py:class:`pygp.gpr.gp_base.GP`

        """
        if(training_data is not None):
            self.set_data(training_data)

        for name, model in self._models.iteritems():
            model.set_active_set_indices(interval_indices[name])
            try:
                if(self._learn_hyperparameters):
                    opt_hyperparameters = opt_hyper(model,
                                                    self._initial_hyperparameters[name],
                                                    priors=self._priors[name],
                                                    *args, **kwargs)[0]
                    self._learned_hyperparameters[name] = opt_hyperparameters
                else:
                    self._learned_hyperparameters[name] = self._initial_hyperparameters[name]
            except ValueError as r:
                print "caught error:", r.message, "\r",
                self._learned_hyperparameters[name] = self._initial_hyperparameters[name]
            self._model_likelihoods[name] = model.LML(self._learned_hyperparameters[name],
                                                              priors=self._priors)

        return self._model_likelihoods
    #    psi_1 = covar.psi_1(hyperparams['covar'], mean, variance, inducing_variables)
    #    psi_2 = covar.psi_2(hyperparams['covar'], mean, variance, inducing_variables)
    #    

        #go through all hyperparams and build bound array (flattened)
     

        # learn latent variables
        skeys = numpy.sort(hyperparams.keys())
        param_struct = dict([(name,hyperparams[name].shape) for name in skeys])
        
        bounds = {}#{'beta': [(numpy.log(1./1E-3))]}
        
        hyper_for_optimizing = copy.deepcopy(hyperparams)
        #hyper_for_optimizing[vars_id] = numpy.log(hyper_for_optimizing[vars_id])
        opt_hyperparams, opt_lml = opt_hyper(g, hyper_for_optimizing, bounds=bounds, maxiter=10000, messages=True)
        #opt_hyperparams[vars_id] = numpy.exp(opt_hyperparams[vars_id])
#        opt_covar = numpy.exp(2*opt_hyperparams['covar'])
#        param_diffs = numpy.abs(numpy.subtract.outer(opt_covar,opt_covar)).round(5)
#        while (param_diffs < .1).all():
##            pylab.close('all')
##            pylab.bar(range(Qlearn),numpy.exp(2*opt_hyperparams['covar']))
##            pylab.figure()
##            opt_covar = numpy.exp(2*opt_hyperparams['covar'])
##            s = numpy.arange(0,Qlearn)[opt_hyperparams['covar']==opt_hyperparams['covar'].max()][0]      
##            for s in xrange(Qlearn):
##                pylab.plot(signals[:,s],'--o')
##                pylab.errorbar(range(N), opt_hyperparams[mean_id][:,s], yerr=numpy.sqrt(opt_hyperparams[vars_id])[:,s], ls='-', alpha=1./numpy.sqrt(opt_hyperparams[vars_id])[:,s].max())
#            yn = 'y' #raw_input("want to restart? [y]/n: ")
#            if len(yn) < 1:
#                yn = 'y'
Exemplo n.º 5
0
    X = Spca.copy()
    #X = SP.random.randn(N,K)
    gplvm = GPLVM(covar_func=covariance, x=X, y=Y)
   
    gpr = GP(covar_func=covariance, x=X, y=Y[:, 0])
    
    #construct hyperparams
    covar = SP.log([0.1, 1.0, 0.1])

    #X are hyperparameters, i.e. we optimize over them also

    #1. this is jointly with the latent X
    X_ = X.copy()
    hyperparams = {'covar': covar, 'x': X_}
    

    #for testing just covar params alone:
    #hyperparams = {'covar': covar}
    
    #evaluate log marginal likelihood
    lml = gplvm.LML(hyperparams=hyperparams)
    [opt_model_params, opt_lml] = opt_hyper(gplvm, hyperparams, gradcheck=False)
    Xo = opt_model_params['x']
    

    for k in xrange(K):
        print SP.corrcoef(Spca[:, k], S[:, k])
    print "=================="
    for k in xrange(K):
        print SP.corrcoef(Xo[:, k], S[:, k])
Exemplo n.º 6
0
Arquivo: gplvm.py Projeto: wqren/pygp
    X = Spca.copy()
    #X = SP.random.randn(N,K)
    gplvm = GPLVM(covar_func=covariance, x=X, y=Y)

    gpr = GP(covar_func=covariance, x=X, y=Y[:, 0])

    #construct hyperparams
    covar = SP.log([0.1, 1.0, 0.1])

    #X are hyperparameters, i.e. we optimize over them also

    #1. this is jointly with the latent X
    X_ = X.copy()
    hyperparams = {'covar': covar, 'x': X_}

    #for testing just covar params alone:
    #hyperparams = {'covar': covar}

    #evaluate log marginal likelihood
    lml = gplvm.LML(hyperparams=hyperparams)
    [opt_model_params, opt_lml] = opt_hyper(gplvm,
                                            hyperparams,
                                            gradcheck=False)
    Xo = opt_model_params['x']

    for k in xrange(K):
        print SP.corrcoef(Spca[:, k], S[:, k])
    print "=================="
    for k in xrange(K):
        print SP.corrcoef(Xo[:, k], S[:, k])
Exemplo n.º 7
0
            return gp.LML(hyperparams,)
        def df4(x):
            hyperparams['warping'][:] = x
            return gp.LMLgrad(hyperparams)['warping']

        x = hyperparams['warping'].copy()       
        checkgrad(f4,df4,x)

    lmld= gp.LMLgrad(hyperparams)
    print lmld


    
    #gp = GP(covar,likelihood=likelihood,x=x,y=y)    
    opt_model_params = opt_hyper(gp, hyperparams,
				 bounds = bounds,
				 maxiter=10000,
				 gradcheck=True)[0]

    
    PL.figure(2)
    z_values = SP.linspace(z.min(),z.max(),100)
    PL.plot(Itrafo(gp.y))
    #opt_hyperparamsPL.plot(z_values,Itrafo(L*z_values))
    pred_inverse = warping_function.f_inv(gp.y,
					  opt_model_params['warping'],
					  iterations = 10)
    PL.plot(pred_inverse)
    #PL.plot(z,y,'r.')
    #PL.legend(['real inverse','learnt inverse','data'])

Exemplo n.º 8
0
            hyperparams['warping'][:] = x
            return gp.LML(hyperparams, )

        def df4(x):
            hyperparams['warping'][:] = x
            return gp.LMLgrad(hyperparams)['warping']

        x = hyperparams['warping'].copy()
        checkgrad(f4, df4, x)

    lmld = gp.LMLgrad(hyperparams)
    print lmld

    #gp = GP(covar,likelihood=likelihood,x=x,y=y)
    opt_model_params = opt_hyper(gp,
                                 hyperparams,
                                 bounds=bounds,
                                 maxiter=10000,
                                 gradcheck=True)[0]

    PL.figure(2)
    z_values = SP.linspace(z.min(), z.max(), 100)
    PL.plot(Itrafo(gp.y))
    #opt_hyperparamsPL.plot(z_values,Itrafo(L*z_values))
    pred_inverse = warping_function.f_inv(gp.y,
                                          opt_model_params['warping'],
                                          iterations=10)
    PL.plot(pred_inverse)
    #PL.plot(z,y,'r.')
    #PL.legend(['real inverse','learnt inverse','data'])
Exemplo n.º 9
0
covar_params = SP.random.randn(n_dimensions + 1)
lik_params = SP.random.randn(1)

Xs = SP.linspace(X.min() - 3, X.max() + 3)[:, SP.newaxis]

t0 = time.time()
#pygp: OLD
covar_ = se.SqexpCFARD(n_dimensions)
ll_ = lik.GaussLikISO()
hyperparams_ = {'covar': covar_params, 'lik': lik_params}
gp_ = GP.GP(covar_, likelihood=ll_, x=X, y=y)
lml_ = gp_.LML(hyperparams_)
dlml_ = gp_.LMLgrad(hyperparams_)
#optimize using pygp:
opt_params_ = opt.opt_hyper(gp_, hyperparams_)[0]
lmlo_ = gp_.LML(opt_params_)

pdb.set_trace()

#GPMIX:
cov = SP.ones([y.shape[0], 2])
cov[:, 1] = SP.randn(cov.shape[0])
covar = limix.CCovSqexpARD(n_dimensions)
ll = limix.CLikNormalIso()

if 1:
    data = limix.CLinearMean(y, cov)
    data_params = SP.ones([cov.shape[1]])
else:
    data = limix.CData()
Exemplo n.º 10
0
covar_params = SP.random.randn(n_dimensions+1)
lik_params = SP.random.randn(1)

Xs = SP.linspace(X.min()-3,X.max()+3)[:,SP.newaxis]

t0 = time.time()
#pygp: OLD
covar_ = se.SqexpCFARD(n_dimensions)
ll_ = lik.GaussLikISO()
hyperparams_ = {'covar':covar_params,'lik':lik_params}
gp_ = GP.GP(covar_,likelihood=ll_,x=X,y=y)
lml_ = gp_.LML(hyperparams_)
dlml_ = gp_.LMLgrad(hyperparams_)
#optimize using pygp:
opt_params_ = opt.opt_hyper(gp_,hyperparams_)[0]
lmlo_ = gp_.LML(opt_params_)


pdb.set_trace()


#GPMIX:
cov = SP.ones([y.shape[0],2])
cov[:,1] = SP.randn(cov.shape[0])
covar  = limix.CCovSqexpARD(n_dimensions)
ll  = limix.CLikNormalIso()

if 1:
    data = limix.CLinearMean(y,cov);
    data_params = SP.ones([cov.shape[1]])
def run_demo():
    LG.basicConfig(level=LG.INFO)
    PL.figure()
    
    random.seed(1)
    
    #0. generate Toy-Data; just samples from a superposition of a sin + linear trend
    n_replicates = 4
    xmin = 1
    xmax = 2.5*SP.pi

    x1_time_steps = 10
    x2_time_steps = 20
    
    x1 = SP.zeros(x1_time_steps*n_replicates)
    x2 = SP.zeros(x2_time_steps*n_replicates)

    for i in xrange(n_replicates):
        x1[i*x1_time_steps:(i+1)*x1_time_steps] = SP.linspace(xmin,xmax,x1_time_steps)
        x2[i*x2_time_steps:(i+1)*x2_time_steps] = SP.linspace(xmin,xmax,x2_time_steps)

    C = 2       #offset
    #b = 0.5
    sigma1 = 0.15
    sigma2 = 0.15
    n_noises = 1
    
    b = 0
    
    y1  = b*x1 + C + 1*SP.sin(x1)
#    dy1 = b   +     1*SP.cos(x1)
    y1 += sigma1*random.randn(y1.shape[0])
    y1-= y1.mean()
    
    y2  = b*x2 + C + 1*SP.sin(x2)
#    dy2 = b   +     1*SP.cos(x2)
    y2 += sigma2*random.randn(y2.shape[0])
    y2-= y2.mean()
    
    for i in xrange(n_replicates):
        x1[i*x1_time_steps:(i+1)*x1_time_steps] += .7 + (i/2.)
        x2[i*x2_time_steps:(i+1)*x2_time_steps] -= .7 + (i/2.)  

    x1 = x1[:,SP.newaxis]
    x2 = x2[:,SP.newaxis]
    
    x = SP.concatenate((x1,x2),axis=0)
    y = SP.concatenate((y1,y2),axis=0)
    
    #predictions:
    X = SP.linspace(xmin-n_replicates,xmax+n_replicates,100*n_replicates)[:,SP.newaxis]
    
    #hyperparamters
    dim = 1
    replicate_indices = []
    for i,xi in enumerate((x1,x2)):
        for rep in SP.arange(i*n_replicates, (i+1)*n_replicates):
            replicate_indices.extend(SP.repeat(rep,len(xi)/n_replicates))
    replicate_indices = SP.array(replicate_indices)
    n_replicates = len(SP.unique(replicate_indices))
    
    logthetaCOVAR = [1,1]
    logthetaCOVAR.extend(SP.repeat(SP.exp(1),n_replicates))
    logthetaCOVAR.extend([sigma1])
    logthetaCOVAR = SP.log(logthetaCOVAR)#,sigma2])
    hyperparams = {'covar':logthetaCOVAR}
    
    SECF = se.SqexpCFARD(dim)
    #noiseCF = noise.NoiseReplicateCF(replicate_indices)
    noiseCF = noise.NoiseCFISO()
    shiftCF = combinators.ShiftCF(SECF,replicate_indices)
    CovFun = combinators.SumCF((shiftCF,noiseCF))
    
    covar_priors = []
    #scale
    covar_priors.append([lnpriors.lnGammaExp,[1,2]])
    for i in range(dim):
        covar_priors.append([lnpriors.lnGammaExp,[1,1]])
    #shift
    for i in range(n_replicates):
        covar_priors.append([lnpriors.lnGauss,[0,.5]])    
    #noise
    for i in range(n_noises):
        covar_priors.append([lnpriors.lnGammaExp,[1,1]])
    
    covar_priors = SP.array(covar_priors)
    priors = {'covar':covar_priors}
    Ifilter = {'covar': SP.ones(n_replicates+3)}
    
    gpr = GP(CovFun,x=x,y=y) 
    opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=True,Ifilter=Ifilter)[0]
    
    #predict
    [M,S_glu] = gpr.predict(opt_model_params,X)
    
    T = opt_model_params['covar'][2:2+n_replicates]
    
    PL.subplot(212)
    gpr_plot.plot_sausage(X,M,SP.sqrt(S_glu),format_line=dict(alpha=1,color='g',lw=2, ls='-'))
    gpr_plot.plot_training_data(x,y,shift=T,replicate_indices=replicate_indices,draw_arrows=2)
    
    PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23)
    
    PL.title("Regression including time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    ylim = PL.ylim()
    
    gpr = GP(combinators.SumCF((SECF,noiseCF)),x=x,y=y)
    priors = {'covar':covar_priors[[0,1,-1]]}
    hyperparams = {'covar':logthetaCOVAR[[0,1,-1]]}
    opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=True)[0]
    
    PL.subplot(211)
    #predict
    [M,S_glu] = gpr.predict(opt_model_params,X)
    
    gpr_plot.plot_sausage(X,M,SP.sqrt(S_glu),format_line=dict(alpha=1,color='g',lw=2, ls='-'))
    gpr_plot.plot_training_data(x,y,replicate_indices=replicate_indices)
    
    PL.title("Regression without time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    PL.ylim(ylim)
    
    PL.subplots_adjust(left=.1, bottom=.1, 
    right=.96, top=.8,
    wspace=.4, hspace=.4)
    PL.show()