Example #1
0
    def getGPR(self, M, opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x, y] = self.M2GPxy(M)

        logtheta = self.logtheta0

        if self.robust:
            gpr = GPREP.GPEP(covar=self.covar,
                             Nep=3,
                             likelihood=self.likelihood,
                             Smean=True,
                             x=x,
                             y=y)
            pass
        else:
            gpr = GPR.GP(self.covar, Smean=True, x=x, y=y)
        if opt:
            Ifilter = S.ones_like(logtheta)
            LG.debug("opt")
            LG.debug('priors: %s' % str(self.priors))
            logtheta = GPR.optHyper(gpr,
                                    logtheta,
                                    Ifilter=Ifilter,
                                    priors=self.priors,
                                    maxiter=self.maxiter)

        gpr.logtheta = logtheta
        gpr.priors = self.priors
        return gpr
Example #2
0
    def getGPR(self,M,opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x,y] = self.M2GPxy(M)

        logtheta = self.logtheta0

        if self.robust:
            gpr = GPREP.GPEP(covar=self.covar,Nep=3,likelihood=self.likelihood,Smean=True,x=x,y=y)
            pass
        else:
            gpr = GPR.GP(self.covar,Smean=True,x=x,y=y)
        if opt:
            Ifilter = S.ones_like(logtheta)
            LG.debug("opt")
            LG.debug('priors: %s' % str(self.priors))
            logtheta=GPR.optHyper(gpr,logtheta,Ifilter=Ifilter,priors=self.priors,maxiter=self.maxiter)

        gpr.logtheta = logtheta
        gpr.priors   = self.priors
        return gpr
Example #3
0
    def getGPR(self, M, opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x, y] = self.M2GPxy(M)

        #initialize GP
        logtheta = self.logtheta0
        priors = self.priors

        #adjust priors ?
        if 0:
            Yexp = 0.5
            scale = 20
            loc = Yexp / scale
            priors[0][1] = [scale, loc]
            Lmin = (x[:, 0].max(axis=0) - x[:, 0].min(axis=0)) / 4
            scale = 30
            loc = Lmin / scale

            priors[1][1] = [scale, loc]
            sigma = 1.0
            scale = 30

            sigma = 0.5
            scale = 3
            loc = sigma / scale

        gpr = GPR.GP(self.covar, Smean=True, x=x, y=y)
        pydb.set_trace()
        if opt:
            #opt filter
            Ifilter = S.ones_like(logtheta)
            logtheta = GPR.optHyper(gpr,
                                    logtheta,
                                    Ifilter=Ifilter,
                                    priors=priors,
                                    maxiter=self.maxiter)
        #save optimised hyperparameters
        gpr.logtheta = logtheta
        return gpr
Example #4
0
    def getGPR(self,M,opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x,y] = self.M2GPxy(M)

        #initialize GP
        logtheta = self.logtheta0
        priors = self.priors

        #adjust priors ?
        if 0:
            Yexp = 0.5
            scale = 20
            loc   = Yexp/scale
            priors[0][1] = [scale,loc]
            Lmin = (x[:,0].max(axis=0)-x[:,0].min(axis=0))/4
            scale = 30
            loc   = Lmin/scale

            priors[1][1] = [scale,loc]
            sigma = 1.0
            scale = 30

            sigma = 0.5
            scale = 3
            loc   = sigma/scale
            

        gpr = GPR.GP(self.covar,Smean=True,x=x,y=y)
        pydb.set_trace()
        if opt:
            #opt filter
            Ifilter = S.ones_like(logtheta)
            logtheta=GPR.optHyper(gpr,logtheta,Ifilter=Ifilter,priors=priors,maxiter=self.maxiter)
        #save optimised hyperparameters
        gpr.logtheta = logtheta
        return gpr
Example #5
0
covar = sederiv.SquaredExponentialCF(dim)
gpr = GPR.GP(covar,Smean=True,x=x,y=y)

if 1:
    GPR.DEBUG=2
    priors = []
    #scale
    priors.append([lngammapdf,[1,2]])
    for i in range(dim):
        priors.append([lngammapdf,[1,1]])
    #noise
    priors.append([lngammapdf,[1,1]])
      
    I_filter =ones_like(logtheta)
    #maybe we should filter optimzing theta
    logthetaO=GPR.optHyper(gpr,logtheta,I_filter,priors=priors)
    print "optimized hyperparameters:" + str(exp(logthetaO))
else:
    logthetaO=logtheta

#predict
[M,S] = gpr.predict(logthetaO,X)


hold(True)
plot(x[:,0], y, 'ro',
     X[:,0], M, 'g-',
     X[:,0], M+2*sqrt(S), 'b-',
        X[:,0], M-2*sqrt(S), 'b-')
#show()
Example #6
0
    def test(self,M0,M1,verbose=False,opt=None,logtrafo=False,rescale=False):
        """test for differential expression
        M0: dataset in condition 0
        M1: dataset in condition 1
        verbose: create verbose plot
        opt: optimise hyper(true)
        logtrafo: takes logs first (False)
        rescale: rescale to unit varaince (False)
        """

        if opt is None:
            opt = self.opt

        [M0,M1] = self.preprocess(M0,M1,logtrafo=logtrafo,rescale=rescale)
        #0. model: both in one bucket:

        ##changed this here:
        #xjoin = S.concatenate((M0[0],M1[0]),axis=1)
        #yjoin = S.concatenate((M0[1],M1[1]),axis=1)
        #Mjoin = [xjoin,yjoin]

        #this version is compatible with lists:
        Mjoin = self.Mconcatenate(M0,M1)
        MJ    = Mjoin
        
        gpr_join = self.getGPR(Mjoin,opt=opt)

        #1. both separately:
        gpr_0    = self.getGPR(M0,opt=False)
        gpr_1    = self.getGPR(M1,opt=False)
        gprs     = GPR.GroupGP([gpr_0,gpr_1])

        if opt:
            
            Ifilter = S.ones_like(gpr_0.logtheta)
#            Ifilter[-1] = 0
            logtheta=GPR.optHyper(gprs,gpr_0.logtheta,Ifilter=Ifilter,priors=gpr_0.priors,maxiter=self.maxiter)
            gpr_0.logtheta=logtheta
            gpr_1.logtheta=logtheta

        LML_join = -1.0*gpr_join.lMl(gpr_join.logtheta,lml=True,dlml=False)
        LML_0    = -1.0*gpr_0.lMl(gpr_0.logtheta,lml=True,dlml=False)
        LML_1    = -1.0*gpr_1.lMl(gpr_1.logtheta,lml=True,dlml=False)
        
        #bayes factor
        ratio    = (LML_0+LML_1-LML_join)
        #store result structures in local object
        self.gpr_join = gpr_join
        self.gpr_s    = gprs
        self.gpr_0    = gpr_0
        self.gpr_1    = gpr_1

        if verbose:
            LG.debug('theta0: %s' % str(S.exp(gpr_0.logtheta)))
            LG.debug('theta1: %s' % str(S.exp(gpr_1.logtheta)))
            X = S.linspace(M0[0][0].min(),M0[0][0].max(),100)
            
            #plot
            PL.clf()
            PL.hold(True)

            LG.debug("theta(0):" + str(S.exp(gpr_0.logtheta)))
            LG.debug("theta(1):" + str(S.exp(gpr_1.logtheta)))
            LG.debug("theta: " + str(S.exp(gpr_join.logtheta)))

            #plot the GP erorrbars and means first:
            self.plotGPpredict(gpr_0,M0,X,{'alpha':0.1,'facecolor':'r'},{'linewidth':2,'color':'r'})
            self.plotGPpredict(gpr_1,M0,X,{'alpha':0.1,'facecolor':'g'},{'linewidth':2,'color':'g'})
            self.plotGPpredict(gpr_join,M0,X,{'alpha':0.1,'facecolor':'b'},{'linewidth':2,'color':'b'})
            for rep in range(len(M0[0])):
                PL.plot(M0[0][rep],M0[1][rep],'r.--')
            for rep in range(len(M1[0])):
                PL.plot(M1[0][rep],M1[1][rep],'g.--')
            PL.xlim((X.min(),X.max()))
            PL.title('%s: %.4f' % ('',ratio))
            PL.xlabel('Time/hr')
            PL.ylabel('Log expression level')
#            Ymax = MJ[1].max()
#            Ymin = MJ[1].min()
#            DY   = Ymax-Ymin
#            PL.ylim([Ymin-0.1*DY,Ymax+0.1*DY])
        return ratio
Example #7
0
    X = Spca.copy()
    #X = SP.random.randn(N,K)
    gplvm = GPLVM(covar_func=covariance,x=X,y=Y)

    gpr = GPR.GP(covar_func=covariance,x=X,y=Y[:,0])
    
    #construct hyperparams
    covar = SP.log([1.0,0.1])

    #X are hyperparameters, i.e. we optimize over them also

    #1. this is jointly with the latent X
    X_ = X.copy()
    hyperparams = {'covar': covar, 'x': X_}
    

    #for testing just covar params alone:
    #hyperparams = {'covar': covar}
    
    #evaluate log marginal likelihood
    lml = gplvm.lMl(hyperparams=hyperparams)
    [opt_model_params,opt_lml]=GPR.optHyper(gplvm,hyperparams,gradcheck=False)
    Xo = opt_model_params['x']
    

    for k in xrange(K):
        print SP.corrcoef(Spca[:,k],S[:,k])

    for k in xrange(K):
        print SP.corrcoef(Xo[:,k],S[:,k])
Example #8
0
    X = Spca.copy()
    #X = SP.random.randn(N,K)
    gplvm = GPLVM(covar_func=covariance, x=X, y=Y)

    gpr = GPR.GP(covar_func=covariance, x=X, y=Y[:, 0])

    #construct hyperparams
    covar = SP.log([1.0, 0.1])

    #X are hyperparameters, i.e. we optimize over them also

    #1. this is jointly with the latent X
    X_ = X.copy()
    hyperparams = {'covar': covar, 'x': X_}

    #for testing just covar params alone:
    #hyperparams = {'covar': covar}

    #evaluate log marginal likelihood
    lml = gplvm.lMl(hyperparams=hyperparams)
    [opt_model_params, opt_lml] = GPR.optHyper(gplvm,
                                               hyperparams,
                                               gradcheck=False)
    Xo = opt_model_params['x']

    for k in xrange(K):
        print SP.corrcoef(Spca[:, k], S[:, k])

    for k in xrange(K):
        print SP.corrcoef(Xo[:, k], S[:, k])
Example #9
0
covar = sederiv.SquaredExponentialCF(dim)
gpr = GPR.GP(covar, Smean=True, x=x, y=y)

if 1:
    GPR.DEBUG = 2
    priors = []
    #scale
    priors.append([lngammapdf, [1, 2]])
    for i in range(dim):
        priors.append([lngammapdf, [1, 1]])
    #noise
    priors.append([lngammapdf, [1, 1]])

    I_filter = ones_like(logtheta)
    #maybe we should filter optimzing theta
    logthetaO = GPR.optHyper(gpr, logtheta, I_filter, priors=priors)
    print "optimized hyperparameters:" + str(exp(logthetaO))
else:
    logthetaO = logtheta

#predict
[M, S] = gpr.predict(logthetaO, X)

hold(True)
plot(x[:, 0], y, 'ro', X[:, 0], M, 'g-', X[:, 0], M + 2 * sqrt(S), 'b-',
     X[:, 0], M - 2 * sqrt(S), 'b-')
#show()

D = zeros([x.shape[0], x.shape[1] + 1])
D[:, 0:-1] = x
D[:, -1] = y
Example #10
0
    def test(self,
             M0,
             M1,
             verbose=False,
             opt=None,
             logtrafo=False,
             rescale=False):
        """test for differential expression
        M0: dataset in condition 0
        M1: dataset in condition 1
        verbose: create verbose plot
        opt: optimise hyper(true)
        logtrafo: takes logs first (False)
        rescale: rescale to unit varaince (False)
        """

        if opt is None:
            opt = self.opt

        [M0, M1] = self.preprocess(M0, M1, logtrafo=logtrafo, rescale=rescale)
        #0. model: both in one bucket:

        ##changed this here:
        #xjoin = S.concatenate((M0[0],M1[0]),axis=1)
        #yjoin = S.concatenate((M0[1],M1[1]),axis=1)
        #Mjoin = [xjoin,yjoin]

        #this version is compatible with lists:
        Mjoin = self.Mconcatenate(M0, M1)
        MJ = Mjoin

        gpr_join = self.getGPR(Mjoin, opt=opt)

        #1. both separately:
        gpr_0 = self.getGPR(M0, opt=False)
        gpr_1 = self.getGPR(M1, opt=False)
        gprs = GPR.GroupGP([gpr_0, gpr_1])

        if opt:

            Ifilter = S.ones_like(gpr_0.logtheta)
            #            Ifilter[-1] = 0
            logtheta = GPR.optHyper(gprs,
                                    gpr_0.logtheta,
                                    Ifilter=Ifilter,
                                    priors=gpr_0.priors,
                                    maxiter=self.maxiter)
            gpr_0.logtheta = logtheta
            gpr_1.logtheta = logtheta

        LML_join = -1.0 * gpr_join.lMl(gpr_join.logtheta, lml=True, dlml=False)
        LML_0 = -1.0 * gpr_0.lMl(gpr_0.logtheta, lml=True, dlml=False)
        LML_1 = -1.0 * gpr_1.lMl(gpr_1.logtheta, lml=True, dlml=False)

        #bayes factor
        ratio = (LML_0 + LML_1 - LML_join)
        #store result structures in local object
        self.gpr_join = gpr_join
        self.gpr_s = gprs
        self.gpr_0 = gpr_0
        self.gpr_1 = gpr_1

        if verbose:
            LG.debug('theta0: %s' % str(S.exp(gpr_0.logtheta)))
            LG.debug('theta1: %s' % str(S.exp(gpr_1.logtheta)))
            X = S.linspace(M0[0][0].min(), M0[0][0].max(), 100)

            #plot
            PL.clf()
            PL.hold(True)

            LG.debug("theta(0):" + str(S.exp(gpr_0.logtheta)))
            LG.debug("theta(1):" + str(S.exp(gpr_1.logtheta)))
            LG.debug("theta: " + str(S.exp(gpr_join.logtheta)))

            #plot the GP erorrbars and means first:
            self.plotGPpredict(gpr_0, M0, X, {
                'alpha': 0.1,
                'facecolor': 'r'
            }, {
                'linewidth': 2,
                'color': 'r'
            })
            self.plotGPpredict(gpr_1, M0, X, {
                'alpha': 0.1,
                'facecolor': 'g'
            }, {
                'linewidth': 2,
                'color': 'g'
            })
            self.plotGPpredict(gpr_join, M0, X, {
                'alpha': 0.1,
                'facecolor': 'b'
            }, {
                'linewidth': 2,
                'color': 'b'
            })
            for rep in range(len(M0[0])):
                PL.plot(M0[0][rep], M0[1][rep], 'r.--')
            for rep in range(len(M1[0])):
                PL.plot(M1[0][rep], M1[1][rep], 'g.--')
            PL.xlim((X.min(), X.max()))
            PL.title('%s: %.4f' % ('', ratio))
            PL.xlabel('Time/hr')
            PL.ylabel('Log expression level')


#            Ymax = MJ[1].max()
#            Ymin = MJ[1].min()
#            DY   = Ymax-Ymin
#            PL.ylim([Ymin-0.1*DY,Ymax+0.1*DY])
        return ratio