예제 #1
0
    def test_gpkronprod(self):
       # initialize
       covar_c = linear.LinearCF(n_dimensions=self.n_latent)
       covar_r = linear.LinearCF(n_dimensions=self.n_dimensions)
       X0_c = SP.random.randn(self.n_tasks,self.n_latent)
       
       lik = likelihood_base.GaussIsoLik()
       gp = gp_kronprod.KronProdGP(covar_c=covar_c, covar_r=covar_r, likelihood=lik)
       gp.setData(Y=self.Ykronprod['train'],X_r=self.X['train'])
       hyperparams = {'lik':SP.array([0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5]), 'covar_c':SP.array([0.5]), 'X_r':self.X['train']}
       # check predictions, likelihood and gradients
       gp.predict(hyperparams,Xstar_r=self.X['test'],debugging=True)

       gp._LML_covar(hyperparams,debugging=True)
       gp._LMLgrad_covar(hyperparams,debugging=True)
       gp._LMLgrad_lik(hyperparams,debugging=True)
       gp._LMLgrad_x(hyperparams,debugging=True)
       
       # optimize
       hyperparams = {'lik':SP.array([0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5]), 'covar_c':SP.array([0.5])}
       opts = {'gradcheck':True}
       hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams,opts=opts)
       Kest = covar_c.K(hyperparams_opt['covar_c'])

       # check predictions, likelihood and gradients
       gp._invalidate_cache() # otherwise debugging parameters are not up to date!
       gp.predict(hyperparams_opt,debugging=True,Xstar_r=self.X['test'])
       gp._LML_covar(hyperparams_opt,debugging=True)
       gp._LMLgrad_covar(hyperparams_opt,debugging=True)
       gp._LMLgrad_lik(hyperparams_opt,debugging=True)
       gp._LMLgrad_x(hyperparams_opt,debugging=True)
예제 #2
0
    def test_gplvm(self):
        covar = linear.LinearCF(n_dimensions=self.n_latent)
        lik = likelihood_base.GaussIsoLik()
        prior = priors.GaussianPrior(key='X', theta=SP.array([1.]))
        gp = gplvm.GPLVM(covar=covar, likelihood=lik, prior=prior)

        X0 = SP.random.randn(self.n_tasks, self.n_latent)
        X0 = self.Xlatent
        covar.X = X0
        gp.setData(Y=self.Ylatent)

        # gradient with respect to X
        hyperparams = {
            'covar': SP.array([0.5]),
            'lik': SP.array([0.5]),
            'X': X0
        }

        LML = gp.LML(hyperparams)
        LMLgrad = gp.LMLgrad(hyperparams)

        LMLgrad_x = SP.zeros((self.n_tasks, self.n_latent))
        W = gp.get_covariances(hyperparams)['W']
        for d in xrange(self.n_latent):
            for n in xrange(self.n_tasks):
                Knd_grad = covar.Kgrad_x(hyperparams['covar'], d, n)
                LMLgrad_x[n, d] = 0.5 * (W * Knd_grad).sum()

        LMLgrad_x += prior.LMLgrad(hyperparams)['X']
        assert SP.allclose(
            LMLgrad['X'],
            LMLgrad_x), 'ouch, gradient with respect to X is wrong'

        # optimize
        opts = {'gradcheck': True}
        hyperparams_opt, lml_opt = optimize_base.opt_hyper(gp,
                                                           hyperparams,
                                                           opts=opts)
        Ktrue = SP.dot(self.Xlatent, self.Xlatent.T)
        covar.X = hyperparams_opt['X']
        Kest = covar.K(hyperparams_opt['covar'])

        # gradient with respect to X
        LML = gp.LML(hyperparams_opt)
        LMLgrad = gp.LMLgrad(hyperparams_opt)
        LMLgrad_x = SP.zeros((self.n_tasks, self.n_latent))
        W = gp.get_covariances(hyperparams_opt)['W']
        for d in xrange(self.n_latent):
            for n in xrange(self.n_tasks):
                Knd_grad = covar.Kgrad_x(hyperparams_opt['covar'], d, n)
                LMLgrad_x[n, d] = 0.5 * (W * Knd_grad).sum()
        LMLgrad_x += prior.LMLgrad(hyperparams_opt)['X']
        assert SP.allclose(
            LMLgrad['X'],
            LMLgrad_x), 'ouch, gradient with respect to X is wrong'
예제 #3
0
def run_optimizer(method, gp, opts, Y, X_r, Icv, cv_idx, X_o=None):
    if 'min_iter' in opts:
        min_iter = opts['min_iter']
    else:
        min_iter = 10
    if 'max_iter' in opts:
        max_iter = opts['max_iter']
    else:
        max_iter = 100

    # initialize
    LG.info('Optimize %s' % method)
    converged = False
    lmltest_global = SP.inf
    hyperparams_global = None
    Ypred_global = None
    r2_global = -SP.inf
    # hold nfolds of the data out
    Itrain = Icv != cv_idx
    Itest = Icv == cv_idx
    i = 1
    while True:
        LG.info('Iteration: %d' % i)
        converged = False
        # stop, if maximum number of iterations is reached
        if i > max_iter:
            break

        # set data
        if X_o == None:
            gp.setData(Y=Y[Itrain], X_r=X_r[Itrain])
        else:
            gp.setData(Y=Y[Itrain], X_r=X_r[Itrain], X_o=X_o[Itrain])
        hyperparams, Ifilter, bounds = initialize.init(method, Y[Itrain].T,
                                                       X_r[Itrain], opts)

        try:
            [hyperparams_opt, lmltrain] = opt.opt_hyper(gp,
                                                        hyperparams,
                                                        opts=opts,
                                                        Ifilter=Ifilter,
                                                        bounds=bounds)
            # gradient need not to be 0, because we have bounds on the hyperparameters...
            gradient = SP.array([
                LA.norm(x) for x in gp.LMLgrad(hyperparams_opt).values()
            ]).mean()
            LG.info('LMLtrain: %.3f' % gp.LML(hyperparams_opt))
            LG.info('Gradient: %.3f' % (gradient))
            converged = True

        except AssertionError, error:
            print 'Assertion Error: %s' % error
            continue
        except:
예제 #4
0
    def test_gpkronprod(self):
        # initialize
        covar_c = linear.LinearCF(n_dimensions=self.n_latent)
        covar_r = linear.LinearCF(n_dimensions=self.n_dimensions)
        X0_c = SP.random.randn(self.n_tasks, self.n_latent)

        lik = likelihood_base.GaussIsoLik()
        gp = gp_kronprod.KronProdGP(covar_c=covar_c,
                                    covar_r=covar_r,
                                    likelihood=lik)
        gp.setData(Y=self.Ykronprod['train'], X_r=self.X['train'])
        hyperparams = {
            'lik': SP.array([0.5]),
            'X_c': X0_c,
            'covar_r': SP.array([0.5]),
            'covar_c': SP.array([0.5]),
            'X_r': self.X['train']
        }
        # check predictions, likelihood and gradients
        gp.predict(hyperparams, Xstar_r=self.X['test'], debugging=True)

        gp._LML_covar(hyperparams, debugging=True)
        gp._LMLgrad_covar(hyperparams, debugging=True)
        gp._LMLgrad_lik(hyperparams, debugging=True)
        gp._LMLgrad_x(hyperparams, debugging=True)

        # optimize
        hyperparams = {
            'lik': SP.array([0.5]),
            'X_c': X0_c,
            'covar_r': SP.array([0.5]),
            'covar_c': SP.array([0.5])
        }
        opts = {'gradcheck': True}
        hyperparams_opt, lml_opt = optimize_base.opt_hyper(gp,
                                                           hyperparams,
                                                           opts=opts)
        Kest = covar_c.K(hyperparams_opt['covar_c'])

        # check predictions, likelihood and gradients
        gp._invalidate_cache(
        )  # otherwise debugging parameters are not up to date!
        gp.predict(hyperparams_opt, debugging=True, Xstar_r=self.X['test'])
        gp._LML_covar(hyperparams_opt, debugging=True)
        gp._LMLgrad_covar(hyperparams_opt, debugging=True)
        gp._LMLgrad_lik(hyperparams_opt, debugging=True)
        gp._LMLgrad_x(hyperparams_opt, debugging=True)
예제 #5
0
def run_optimizer(method,gp,opts,Y,X_r,Icv,cv_idx,X_o=None):
    if 'min_iter' in opts:
        min_iter  = opts['min_iter']
    else:
        min_iter = 10
    if 'max_iter' in opts:
        max_iter = opts['max_iter']
    else:
        max_iter = 100

    # initialize
    LG.info('Optimize %s'%method)
    converged = False
    lmltest_global = SP.inf
    hyperparams_global = None
    Ypred_global = None
    r2_global = -SP.inf
    # hold nfolds of the data out
    Itrain = Icv!=cv_idx
    Itest = Icv==cv_idx
    i=1
    while True:
        LG.info('Iteration: %d'%i)
        converged = False
        # stop, if maximum number of iterations is reached
        if i>max_iter:
            break

        # set data
        if X_o==None:
            gp.setData(Y=Y[Itrain],X_r=X_r[Itrain]) 
        else:
            gp.setData(Y=Y[Itrain],X_r=X_r[Itrain],X_o=X_o[Itrain])
        hyperparams,Ifilter,bounds = initialize.init(method,Y[Itrain].T,X_r[Itrain],opts)

        try:
            [hyperparams_opt,lmltrain] = opt.opt_hyper(gp,hyperparams,opts=opts,Ifilter=Ifilter,bounds=bounds)
            # gradient need not to be 0, because we have bounds on the hyperparameters...
            gradient = SP.array([LA.norm(x) for x in gp.LMLgrad(hyperparams_opt).values()]).mean()
            LG.info('LMLtrain: %.3f'%gp.LML(hyperparams_opt))
            LG.info('Gradient: %.3f'%(gradient))
            converged = True

        except AssertionError, error:
            print 'Assertion Error: %s'%error
            continue
        except:
예제 #6
0
def measure_runtime(env,N,D,n_reps=10,time_out=10000):
    opts = {'messages':False}
    out_dir = os.path.join(env['out_dir'],'simulations_runtime')
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    t_fast = SP.zeros(n_reps)
    t_slow = SP.zeros(n_reps)
    lml_fast = SP.zeros(n_reps)
    lml_slow = SP.zeros(n_reps)
     
    for i in range(n_reps):
        # load data
        var_signal = 0.5
        data,RV = load_simulations(env,var_signal,N,D,i)

        # initialize
        covar_c = lowrank.LowRankCF(n_dimensions=RV['n_c'])
        covar_r = linear.LinearCF(n_dimensions=RV['n_r'])
        covar_s = lowrank.LowRankCF(n_dimensions=RV['n_sigma'])
        covar_o = fixed.FixedCF(n_dimensions=RV['n_r'])
        X = data.getX(standardized=False)
        Y = data.getY(standardized=False).T
        hyperparams,Ifilter,bounds = initialize.init('GPkronsum_LIN',Y.T,X,RV)
        covar_r.X = X
        covar_o.X = X
        covar_o._K = SP.eye(RV['N'])
        covar_s.X = hyperparams['X_s']
        covar_c.X = hyperparams['X_c']
        kgp_fast = gp_kronsum.KronSumGP(covar_r=covar_r,covar_c=covar_c,covar_s=covar_s,covar_o=covar_o)
        kgp_fast.setData(Y=Y)
        
        # measure time
        signal.signal(signal.SIGALRM,handler)
        signal.alarm(time_out)
        try:
             t_start = time.clock()
             hyperparams_opt,lmltrain = opt.opt_hyper(kgp_fast,hyperparams,Ifilter=Ifilter,bounds=bounds,opts=opts)
             t_stop = time.clock()
             signal.alarm(0)
             t_fast[i] = t_stop - t_start
             lml_fast[i] = lmltrain
        except Exception, e:
            print e
            t_slow += time_out
            break
예제 #7
0
    def test_gplvm(self):
        covar = linear.LinearCF(n_dimensions=self.n_latent)
        lik = likelihood_base.GaussIsoLik()
        prior = priors.GaussianPrior(key='X',theta=SP.array([1.]))
        gp = gplvm.GPLVM(covar=covar,likelihood=lik,prior=prior)
        
        
        X0 = SP.random.randn(self.n_tasks, self.n_latent)
        X0 = self.Xlatent
        covar.X = X0
        gp.setData(Y=self.Ylatent)

        # gradient with respect to X
        hyperparams = {'covar':SP.array([0.5]), 'lik':SP.array([0.5]),'X':X0}
        
        LML = gp.LML(hyperparams)
        LMLgrad = gp.LMLgrad(hyperparams)

        LMLgrad_x = SP.zeros((self.n_tasks, self.n_latent))
        W = gp.get_covariances(hyperparams)['W']
        for d in xrange(self.n_latent):
            for n in xrange(self.n_tasks):
                Knd_grad = covar.Kgrad_x(hyperparams['covar'],d,n)
                LMLgrad_x[n,d] = 0.5*(W*Knd_grad).sum()
                
        LMLgrad_x += prior.LMLgrad(hyperparams)['X']
        assert SP.allclose(LMLgrad['X'],LMLgrad_x), 'ouch, gradient with respect to X is wrong'

        # optimize
        opts = {'gradcheck':True}
        hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams,opts=opts)
        Ktrue = SP.dot(self.Xlatent,self.Xlatent.T)
        covar.X = hyperparams_opt['X']
        Kest = covar.K(hyperparams_opt['covar'])

        # gradient with respect to X
        LML = gp.LML(hyperparams_opt)
        LMLgrad = gp.LMLgrad(hyperparams_opt)
        LMLgrad_x = SP.zeros((self.n_tasks, self.n_latent))
        W = gp.get_covariances(hyperparams_opt)['W']
        for d in xrange(self.n_latent):
            for n in xrange(self.n_tasks):
                Knd_grad = covar.Kgrad_x(hyperparams_opt['covar'],d,n)
                LMLgrad_x[n,d] = 0.5*(W*Knd_grad).sum()
        LMLgrad_x += prior.LMLgrad(hyperparams_opt)['X']
        assert SP.allclose(LMLgrad['X'],LMLgrad_x), 'ouch, gradient with respect to X is wrong'
예제 #8
0
        Y = data.getY(standardized=False).T
        hyperparams,Ifilter,bounds = initialize.init('GPkronsum_LIN',Y.T,X,RV)
        covar_r.X = X
        covar_o.X = X
        covar_o._K = SP.eye(RV['N'])
        covar_s.X = hyperparams['X_s']
        covar_c.X = hyperparams['X_c']
        kgp_slow = gp_kronsum_naive.KronSumGP(covar_r=covar_r,covar_c=covar_c,covar_s=covar_s,covar_o=covar_o)
        kgp_slow.setData(Y=Y)

        # measure time
        signal.signal(signal.SIGALRM,handler)
        signal.alarm(time_out)
        try:
             t_start = time.clock()
             hyperparams_opt,lmltrain = opt.opt_hyper(kgp_slow,hyperparams,Ifilter=Ifilter,bounds=bounds,opts=opts)
             t_stop = time.clock()
             signal.alarm(0)
             t_slow[i] = t_stop - t_start
             lml_slow[i] = lmltrain
        except Exception, e:
            print e
            t_slow += time_out
            break 

    # save
    fn_out =  os.path.join(out_dir,'results_runtime_signal%03d_N%d_D%d.hdf5'%(var_signal*1E3,N,D))
    f = h5py.File(fn_out,'w')
    f['t_fast'] = t_fast
    f['t_slow'] = t_slow
    f['lml_fast'] = lml_fast
예제 #9
0
    def test_gpbase(self):

        covar = linear.LinearCF(n_dimensions=self.n_dimensions)
        n_train = self.X['train'].shape[0]

        theta = 1E-1
        prior_cov = priors.GaussianPrior(key='covar',theta=SP.array([1.]))
        prior_lik = priors.GaussianPrior(key='lik',theta=SP.array([1.]))
        prior = priors.PriorList([prior_cov,prior_lik])
        
        lik = likelihood_base.GaussIsoLik()
        gp = gp_base.GP(covar_r=covar,likelihood=lik,prior=prior)
        gp.setData(Y=self.Yuni['train'],X=self.X['train'])

        # log likelihood and gradient derivation
        hyperparams = {'covar':SP.array([0.5]), 'lik':SP.array([0.5])}
        LML = gp.LML(hyperparams)
        LMLgrad = gp.LMLgrad(hyperparams)
        
        K = covar.K(hyperparams['covar']) + lik.K(hyperparams['lik'],n_train)
        Kgrad_covar = covar.Kgrad_theta(hyperparams['covar'],0)
        Kgrad_lik = lik.Kgrad_theta(hyperparams['lik'],n_train,0)

        KinvY = LA.solve(K,self.Yuni['train'])
        _LML = self.n_train/2*SP.log(2*SP.pi) + 0.5*SP.log(LA.det(K)) + 0.5*(self.Yuni['train']*KinvY).sum() + prior.LML(hyperparams)
        LMLgrad_covar = 0.5 * SP.trace(LA.solve(K, Kgrad_covar)) - 0.5*SP.dot(KinvY.T,SP.dot(Kgrad_covar,KinvY))
        LMLgrad_covar+= prior_cov.LMLgrad(hyperparams)['covar']
        LMLgrad_lik = 0.5 * SP.trace(LA.solve(K, Kgrad_lik)) - 0.5*SP.dot(KinvY.T,SP.dot(Kgrad_lik,KinvY))
        LMLgrad_lik+= prior_lik.LMLgrad(hyperparams)['lik']

        
        assert SP.allclose(LML,_LML), 'ouch, marginal log likelihood does not match'
        assert SP.allclose(LMLgrad['covar'], LMLgrad_covar), 'ouch, gradient with respect to theta does not match'
        assert SP.allclose(LMLgrad['lik'], LMLgrad_lik), 'ouch, gradient with respect to theta does not match'

        # predict
        Ystar = gp.predict(hyperparams,self.X['test'])
        Kstar = covar.Kcross(hyperparams['covar'])
        _Ystar = SP.dot(Kstar.T,LA.solve(K,self.Yuni['train'])).flatten()
        assert SP.allclose(Ystar,_Ystar), 'ouch, predictions, do not match'
        
        # optimize
        opts = {'gradcheck':True,'messages':False}
        hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams,opts=opts)
        
        # log likelihood and gradient derivation
        LML = gp.LML(hyperparams_opt)
        LMLgrad = gp.LMLgrad(hyperparams_opt)

        K = covar.K(hyperparams_opt['covar']) + lik.K(hyperparams_opt['lik'],n_train)
        Kgrad_covar = covar.Kgrad_theta(hyperparams_opt['covar'],0)
        Kgrad_lik = lik.Kgrad_theta(hyperparams_opt['lik'],n_train,0)
        
        KinvY = LA.solve(K,self.Yuni['train'])
        _LML = self.n_train/2*SP.log(2*SP.pi) + 0.5*SP.log(LA.det(K)) + 0.5*(self.Yuni['train']*KinvY).sum() + prior.LML(hyperparams_opt)
        LMLgrad_covar = 0.5 * SP.trace(LA.solve(K, Kgrad_covar)) - 0.5*SP.dot(KinvY.T,SP.dot(Kgrad_covar,KinvY))
        LMLgrad_covar+= prior_cov.LMLgrad(hyperparams_opt)['covar']
        LMLgrad_lik = 0.5 * SP.trace(LA.solve(K, Kgrad_lik)) - 0.5*SP.dot(KinvY.T,SP.dot(Kgrad_lik,KinvY))
        LMLgrad_lik+= prior_lik.LMLgrad(hyperparams_opt)['lik']

        assert SP.allclose(LML,_LML), 'ouch, marginal log likelihood does not match'
        assert SP.allclose(LMLgrad['covar'], LMLgrad_covar), 'ouch, gradient with respect to theta does not match'
        assert SP.allclose(LMLgrad['lik'], LMLgrad_lik), 'ouch, gradient with respect to theta does not match'
   
        # predict
        Ystar = gp.predict(hyperparams_opt,self.X['test'])
        Kstar = covar.Kcross(hyperparams_opt['covar'])
        _Ystar = SP.dot(Kstar.T,LA.solve(K,self.Yuni['train'])).flatten()
        assert SP.allclose(Ystar,_Ystar), 'ouch, predictions, do not match'
예제 #10
0
    def test_gpkronsum(self):
        covar_c = lowrank.LowRankCF(n_dimensions=self.n_latent)
        covar_r = lowrank.LowRankCF(n_dimensions=self.n_dimensions)
        covar_s = lowrank.LowRankCF(n_dimensions=self.n_latent)
        covar_o = lowrank.LowRankCF(n_dimensions = self.n_dimensions)

        X0_c = SP.random.randn(self.n_tasks,self.n_latent)
        X0_s = SP.random.randn(self.n_tasks,self.n_latent)
        X0_r = SP.random.randn(self.n_train,self.n_dimensions)
        X0_o = SP.random.randn(self.n_train,self.n_dimensions)

        gp = gp_kronsum.KronSumGP(covar_c=covar_c, covar_r=covar_r, covar_s=covar_s, covar_o=covar_o)
        gp.setData(Y=self.Ykronsum['train'])

        gp2 = gp_kronsum_naive.KronSumGP(covar_c=covar_c,covar_r=covar_r,covar_s=covar_s,covar_o=covar_o)
        gp2.setData(Y=self.Ykronsum['train'])
        
        hyperparams = {'covar_c':SP.array([0.5,0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5,0.5]), 'X_r':X0_r,
                       'covar_s':SP.array([0.5,0.5]), 'X_s':X0_s, 'covar_o':SP.array([0.5,0.5]), 'X_o':X0_o}

        yhat = gp.predict(hyperparams,Xstar_r=self.X['test'],debugging=True)
        lml = gp._LML_covar(hyperparams,debugging=True)
        grad = {}
        grad.update(gp._LMLgrad_c(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_r(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_o(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_s(hyperparams, debugging=True))
        

        yhat2 = gp2.predict(hyperparams,Xstar_r=self.X['test'])
        lml2 = gp2._LML_covar(hyperparams)
        grad2 = {}
        grad2.update(gp2._LMLgrad_covar(hyperparams))
        grad2.update(gp2._LMLgrad_x(hyperparams))

        assert SP.allclose(yhat,yhat2), 'predictions does not match'
        assert SP.allclose(lml,lml2), 'log likelihood does not match'
        for key in grad.keys():
            assert SP.allclose(grad[key],grad2[key]), 'gradient with respect to x does not match'
            
        covar_o = diag.DiagIsoCF(n_dimensions = self.n_dimensions)
        gp = gp_kronsum.KronSumGP(covar_c=covar_c, covar_r=covar_r, covar_s=covar_s, covar_o=covar_o)
        gp.setData(Y=self.Ykronsum['train'],X_r=self.X['train'],X_o=self.X['train'])

        gp2 = gp_kronsum_naive.KronSumGP(covar_c=covar_c, covar_r=covar_r, covar_s=covar_s, covar_o=covar_o)
        gp2.setData(Y=self.Ykronsum['train'],X_r=self.X['train'],X_o=self.X['train'])
        
        hyperparams = {'covar_c':SP.array([0.5,0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5,0.5]),
                       'covar_s':SP.array([0.5,0.5]), 'X_s':X0_s, 'covar_o':SP.array([0.5])}
        
        bounds = {'covar_c':SP.array([[-5,+5]]*2), 'covar_r':SP.array([[-5,+5]]*2), 'covar_s':SP.array([[-5,+5]]*2), 'covar_o':SP.array([[-5,+5]])}
        opts = {'gradcheck':True}
        import time
        t_start = time.time()
        hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams,opts=opts, bounds=bounds)
        t_stop = time.time()
        print 'time(training): %.4f'%(t_stop-t_start)

        t_start = time.time()
        hyperparams_opt2, lml_opt2 = optimize_base.opt_hyper(gp2,hyperparams,opts=opts, bounds=bounds)
        t_stop = time.time()
        
        print 'time(training): %.4f'%(t_stop-t_start)
        assert SP.allclose(lml_opt,lml_opt2), 'ouch, optimization did fail'
        
        gp._invalidate_cache() # otherwise debugging parameters are not up to date!
        yhat = gp.predict(hyperparams_opt,Xstar_r=self.X['test'],debugging=True)
        lml = gp._LML_covar(hyperparams_opt,debugging=True)
        grad = {}
        grad.update(gp._LMLgrad_c(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_r(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_o(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_s(hyperparams_opt, debugging=True))
        

        yhat2 = gp2.predict(hyperparams_opt,Xstar_r=self.X['test'])
        lml2 = gp2._LML_covar(hyperparams_opt)
        grad2 = {}
        grad2.update(gp2._LMLgrad_covar(hyperparams_opt))
        grad2.update(gp2._LMLgrad_x(hyperparams_opt))

        assert SP.allclose(yhat,yhat2), 'predictions does not match'
        assert SP.allclose(lml,lml2), 'log likelihood does not match'
        for key in grad.keys():
            assert SP.allclose(grad[key],grad2[key]), 'gradient with respect to x does not match'
 covar_c.X = Y_train_hat.T
 
 covar_r = composite.SumCF(n_dimensions = X_train.shape[1])
 covar_r.append_covar(linear.LinearCF(n_dimensions = X_train.shape[1]))
 covar_r.append_covar(se.SqExpCF(n_dimensions = X_train.shape[1]))
 covar_r.append_covar(DiagIsoCF(n_dimensions = X_train.shape[1]))
 covar_r.X = X_train
 
 likelihood = lik.GaussIsoLik()
 
 gp = sMTGPR.sMTGPR(covar_r = covar_r, covar_c = covar_c, likelihood = likelihood, basis = B)
 gp.setData(Y = Y_train, Y_hat = Y_train_hat, X = X_train)
 
 # Training: optimize hyperparameters
 t_start = time.clock()
 hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp, hyperparams, bounds = bounds, Ifilter = Ifilter)
 t_stop = time.clock()
 elapsed_time_opt[r,b] = t_stop - t_start
 
 # Testing
 t_start = time.clock()
 Y_pred_MT, Y_pred_cov_MT = gp.predict(hyperparams_opt, Xstar_r = X_test, compute_cov = True)
 t_stop = time.clock()
 elapsed_time_est[r,b] = t_stop - t_start
 
 Y_pred_MT = Y_scaler.inverse_transform(Y_pred_MT)
 Y_pred_cov_MT = Y_pred_cov_MT * Y_scaler.var_
 s_n2_MT = likelihood.Kdiag(hyperparams_opt['lik'], n_task) * Y_scaler.var_
 
 r2_MT[r,b] = np.mean(compute_r2(Y_test, Y_pred_MT))
 NPMs_MT = normative_prob_map(Y_test, Y_pred_MT, Y_pred_cov_MT, s_n2_MT)
예제 #12
0
    def test_gpbase(self):

        covar = linear.LinearCF(n_dimensions=self.n_dimensions)
        n_train = self.X['train'].shape[0]

        theta = 1E-1
        prior_cov = priors.GaussianPrior(key='covar', theta=SP.array([1.]))
        prior_lik = priors.GaussianPrior(key='lik', theta=SP.array([1.]))
        prior = priors.PriorList([prior_cov, prior_lik])

        lik = likelihood_base.GaussIsoLik()
        gp = gp_base.GP(covar_r=covar, likelihood=lik, prior=prior)
        gp.setData(Y=self.Yuni['train'], X=self.X['train'])

        # log likelihood and gradient derivation
        hyperparams = {'covar': SP.array([0.5]), 'lik': SP.array([0.5])}
        LML = gp.LML(hyperparams)
        LMLgrad = gp.LMLgrad(hyperparams)

        K = covar.K(hyperparams['covar']) + lik.K(hyperparams['lik'], n_train)
        Kgrad_covar = covar.Kgrad_theta(hyperparams['covar'], 0)
        Kgrad_lik = lik.Kgrad_theta(hyperparams['lik'], n_train, 0)

        KinvY = LA.solve(K, self.Yuni['train'])
        _LML = self.n_train / 2 * SP.log(2 * SP.pi) + 0.5 * SP.log(
            LA.det(K)) + 0.5 * (self.Yuni['train'] *
                                KinvY).sum() + prior.LML(hyperparams)
        LMLgrad_covar = 0.5 * SP.trace(LA.solve(
            K, Kgrad_covar)) - 0.5 * SP.dot(KinvY.T, SP.dot(
                Kgrad_covar, KinvY))
        LMLgrad_covar += prior_cov.LMLgrad(hyperparams)['covar']
        LMLgrad_lik = 0.5 * SP.trace(LA.solve(K, Kgrad_lik)) - 0.5 * SP.dot(
            KinvY.T, SP.dot(Kgrad_lik, KinvY))
        LMLgrad_lik += prior_lik.LMLgrad(hyperparams)['lik']

        assert SP.allclose(
            LML, _LML), 'ouch, marginal log likelihood does not match'
        assert SP.allclose(
            LMLgrad['covar'], LMLgrad_covar
        ), 'ouch, gradient with respect to theta does not match'
        assert SP.allclose(
            LMLgrad['lik'],
            LMLgrad_lik), 'ouch, gradient with respect to theta does not match'

        # predict
        Ystar = gp.predict(hyperparams, self.X['test'])
        Kstar = covar.Kcross(hyperparams['covar'])
        _Ystar = SP.dot(Kstar.T, LA.solve(K, self.Yuni['train'])).flatten()
        assert SP.allclose(Ystar, _Ystar), 'ouch, predictions, do not match'

        # optimize
        opts = {'gradcheck': True, 'messages': False}
        hyperparams_opt, lml_opt = optimize_base.opt_hyper(gp,
                                                           hyperparams,
                                                           opts=opts)

        # log likelihood and gradient derivation
        LML = gp.LML(hyperparams_opt)
        LMLgrad = gp.LMLgrad(hyperparams_opt)

        K = covar.K(hyperparams_opt['covar']) + lik.K(hyperparams_opt['lik'],
                                                      n_train)
        Kgrad_covar = covar.Kgrad_theta(hyperparams_opt['covar'], 0)
        Kgrad_lik = lik.Kgrad_theta(hyperparams_opt['lik'], n_train, 0)

        KinvY = LA.solve(K, self.Yuni['train'])
        _LML = self.n_train / 2 * SP.log(2 * SP.pi) + 0.5 * SP.log(
            LA.det(K)) + 0.5 * (self.Yuni['train'] *
                                KinvY).sum() + prior.LML(hyperparams_opt)
        LMLgrad_covar = 0.5 * SP.trace(LA.solve(
            K, Kgrad_covar)) - 0.5 * SP.dot(KinvY.T, SP.dot(
                Kgrad_covar, KinvY))
        LMLgrad_covar += prior_cov.LMLgrad(hyperparams_opt)['covar']
        LMLgrad_lik = 0.5 * SP.trace(LA.solve(K, Kgrad_lik)) - 0.5 * SP.dot(
            KinvY.T, SP.dot(Kgrad_lik, KinvY))
        LMLgrad_lik += prior_lik.LMLgrad(hyperparams_opt)['lik']

        assert SP.allclose(
            LML, _LML), 'ouch, marginal log likelihood does not match'
        assert SP.allclose(
            LMLgrad['covar'], LMLgrad_covar
        ), 'ouch, gradient with respect to theta does not match'
        assert SP.allclose(
            LMLgrad['lik'],
            LMLgrad_lik), 'ouch, gradient with respect to theta does not match'

        # predict
        Ystar = gp.predict(hyperparams_opt, self.X['test'])
        Kstar = covar.Kcross(hyperparams_opt['covar'])
        _Ystar = SP.dot(Kstar.T, LA.solve(K, self.Yuni['train'])).flatten()
        assert SP.allclose(Ystar, _Ystar), 'ouch, predictions, do not match'
예제 #13
0
    def test_gpkronsum(self):
        covar_c = lowrank.LowRankCF(n_dimensions=self.n_latent)
        covar_r = lowrank.LowRankCF(n_dimensions=self.n_dimensions)
        covar_s = lowrank.LowRankCF(n_dimensions=self.n_latent)
        covar_o = lowrank.LowRankCF(n_dimensions=self.n_dimensions)

        X0_c = SP.random.randn(self.n_tasks, self.n_latent)
        X0_s = SP.random.randn(self.n_tasks, self.n_latent)
        X0_r = SP.random.randn(self.n_train, self.n_dimensions)
        X0_o = SP.random.randn(self.n_train, self.n_dimensions)

        gp = gp_kronsum.KronSumGP(covar_c=covar_c,
                                  covar_r=covar_r,
                                  covar_s=covar_s,
                                  covar_o=covar_o)
        gp.setData(Y=self.Ykronsum['train'])

        gp2 = gp_kronsum_naive.KronSumGP(covar_c=covar_c,
                                         covar_r=covar_r,
                                         covar_s=covar_s,
                                         covar_o=covar_o)
        gp2.setData(Y=self.Ykronsum['train'])

        hyperparams = {
            'covar_c': SP.array([0.5, 0.5]),
            'X_c': X0_c,
            'covar_r': SP.array([0.5, 0.5]),
            'X_r': X0_r,
            'covar_s': SP.array([0.5, 0.5]),
            'X_s': X0_s,
            'covar_o': SP.array([0.5, 0.5]),
            'X_o': X0_o
        }

        yhat = gp.predict(hyperparams, Xstar_r=self.X['test'], debugging=True)
        lml = gp._LML_covar(hyperparams, debugging=True)
        grad = {}
        grad.update(gp._LMLgrad_c(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_r(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_o(hyperparams, debugging=True))
        grad.update(gp._LMLgrad_s(hyperparams, debugging=True))

        yhat2 = gp2.predict(hyperparams, Xstar_r=self.X['test'])
        lml2 = gp2._LML_covar(hyperparams)
        grad2 = {}
        grad2.update(gp2._LMLgrad_covar(hyperparams))
        grad2.update(gp2._LMLgrad_x(hyperparams))

        assert SP.allclose(yhat, yhat2), 'predictions does not match'
        assert SP.allclose(lml, lml2), 'log likelihood does not match'
        for key in grad.keys():
            assert SP.allclose(
                grad[key],
                grad2[key]), 'gradient with respect to x does not match'

        covar_o = diag.DiagIsoCF(n_dimensions=self.n_dimensions)
        gp = gp_kronsum.KronSumGP(covar_c=covar_c,
                                  covar_r=covar_r,
                                  covar_s=covar_s,
                                  covar_o=covar_o)
        gp.setData(Y=self.Ykronsum['train'],
                   X_r=self.X['train'],
                   X_o=self.X['train'])

        gp2 = gp_kronsum_naive.KronSumGP(covar_c=covar_c,
                                         covar_r=covar_r,
                                         covar_s=covar_s,
                                         covar_o=covar_o)
        gp2.setData(Y=self.Ykronsum['train'],
                    X_r=self.X['train'],
                    X_o=self.X['train'])

        hyperparams = {
            'covar_c': SP.array([0.5, 0.5]),
            'X_c': X0_c,
            'covar_r': SP.array([0.5, 0.5]),
            'covar_s': SP.array([0.5, 0.5]),
            'X_s': X0_s,
            'covar_o': SP.array([0.5])
        }

        bounds = {
            'covar_c': SP.array([[-5, +5]] * 2),
            'covar_r': SP.array([[-5, +5]] * 2),
            'covar_s': SP.array([[-5, +5]] * 2),
            'covar_o': SP.array([[-5, +5]])
        }
        opts = {'gradcheck': True}
        import time
        t_start = time.time()
        hyperparams_opt, lml_opt = optimize_base.opt_hyper(gp,
                                                           hyperparams,
                                                           opts=opts,
                                                           bounds=bounds)
        t_stop = time.time()
        print 'time(training): %.4f' % (t_stop - t_start)

        t_start = time.time()
        hyperparams_opt2, lml_opt2 = optimize_base.opt_hyper(gp2,
                                                             hyperparams,
                                                             opts=opts,
                                                             bounds=bounds)
        t_stop = time.time()

        print 'time(training): %.4f' % (t_stop - t_start)
        assert SP.allclose(lml_opt, lml_opt2), 'ouch, optimization did fail'

        gp._invalidate_cache(
        )  # otherwise debugging parameters are not up to date!
        yhat = gp.predict(hyperparams_opt,
                          Xstar_r=self.X['test'],
                          debugging=True)
        lml = gp._LML_covar(hyperparams_opt, debugging=True)
        grad = {}
        grad.update(gp._LMLgrad_c(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_r(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_o(hyperparams_opt, debugging=True))
        grad.update(gp._LMLgrad_s(hyperparams_opt, debugging=True))

        yhat2 = gp2.predict(hyperparams_opt, Xstar_r=self.X['test'])
        lml2 = gp2._LML_covar(hyperparams_opt)
        grad2 = {}
        grad2.update(gp2._LMLgrad_covar(hyperparams_opt))
        grad2.update(gp2._LMLgrad_x(hyperparams_opt))

        assert SP.allclose(yhat, yhat2), 'predictions does not match'
        assert SP.allclose(lml, lml2), 'log likelihood does not match'
        for key in grad.keys():
            assert SP.allclose(
                grad[key],
                grad2[key]), 'gradient with respect to x does not match'
예제 #14
0
    
    # initialization parameters
    hyperparams, Ifilter, bounds = initialize.init('GPkronsum_LIN',Y.T,X_r,{'n_c':n_latent, 'n_sigma':n_latent})
    
    # initialize gp and its covariance functions
    covar_r.X = X_r
    covar_o.X = X_r
    covar_o._K = SP.eye(n_train)
    covar_s.X = hyperparams['X_s']
    covar_c.X = hyperparams['X_c']
    gp = gp_kronsum.KronSumGP(covar_c=covar_c, covar_r=covar_r, covar_s=covar_s, covar_o=covar_o)
    gp.setData(Y=Y)  

    # optimize hyperparameters
    t_start = time.time()
    hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams, bounds=bounds,Ifilter=Ifilter)
    t_stop = time.time()
    print 'time(training): %.4f'%(t_stop-t_start)

    # compare
    SigmaOpt = covar_s.K(hyperparams_opt['covar_s'])
    COpt = covar_c.K(hyperparams_opt['covar_c'])

    fig = PLT.figure(1)
    fig.add_subplot(221)
    fig.subplots_adjust(hspace=0.5)
    
    PLT.imshow(C,interpolation='nearest')
    PLT.title('True Signal Covariance')
    PLT.xlabel('Tasks'); PLT.ylabel('Tasks')