示例#1
0
    def getGPR(self, M, opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x, y] = self.M2GPxy(M)

        logtheta = self.logtheta0

        if self.robust:
            gpr = GPREP.GPEP(covar=self.covar,
                             Nep=3,
                             likelihood=self.likelihood,
                             Smean=True,
                             x=x,
                             y=y)
            pass
        else:
            gpr = GPR.GP(self.covar, Smean=True, x=x, y=y)
        if opt:
            Ifilter = S.ones_like(logtheta)
            LG.debug("opt")
            LG.debug('priors: %s' % str(self.priors))
            logtheta = GPR.optHyper(gpr,
                                    logtheta,
                                    Ifilter=Ifilter,
                                    priors=self.priors,
                                    maxiter=self.maxiter)

        gpr.logtheta = logtheta
        gpr.priors = self.priors
        return gpr
示例#2
0
    def getGPR(self, M, opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x, y] = self.M2GPxy(M)

        #initialize GP
        logtheta = self.logtheta0
        priors = self.priors

        #adjust priors ?
        if 0:
            Yexp = 0.5
            scale = 20
            loc = Yexp / scale
            priors[0][1] = [scale, loc]
            Lmin = (x[:, 0].max(axis=0) - x[:, 0].min(axis=0)) / 4
            scale = 30
            loc = Lmin / scale

            priors[1][1] = [scale, loc]
            sigma = 1.0
            scale = 30

            sigma = 0.5
            scale = 3
            loc = sigma / scale

        gpr = GPR.GP(self.covar, Smean=True, x=x, y=y)
        pydb.set_trace()
        if opt:
            #opt filter
            Ifilter = S.ones_like(logtheta)
            logtheta = GPR.optHyper(gpr,
                                    logtheta,
                                    Ifilter=Ifilter,
                                    priors=priors,
                                    maxiter=self.maxiter)
        #save optimised hyperparameters
        gpr.logtheta = logtheta
        return gpr
示例#3
0
    x[Iout,1] = +1
    pass


#predictions:
X = linspace(0,10,100)
X = X.reshape(size(X),1)

logtheta = log([1,1,sigma])
dim = 1


#initialize covariance,likelihood and gp...

covar = sederiv.SquaredExponentialCF(dim)
gpr   = GPR.GP(covar,Smean=True,x=x,y=y)

if 0:
    gprEP = GPREP.GPEP(covar=covar,likelihood=GaussLikelihood(),Smean=True,rescale=False,x=x,y=y)
    logthetaEP = log([1,1,1E-6,sigma])
if 1:
    #use constrained likelihood
    likelihood = ConstrainedLikelihood(alt=GaussLikelihood())
    gprEP = GPREP.GPEP(covar=covar,Nep=Nep,likelihood=likelihood,Smean=True,rescale=False,x=x,y=y)
    logthetaEP = log([1,1,1E-6,sigma])
    
#predict

if 1:
    [MEP,SEP] = gprEP.predict(logthetaEP,X)
    figure(1)
示例#4
0
    def test_interval(self,M0,M1,verbose=True,opt=None,Ngibbs_iterations=None):
        """test for differential expression with clustering model
        - returns a data structure which reflects the time of sepataoin (posterior over Z)
        """

        def updateGP():
            """update the GP datasets and re-evaluate the Ep approximate likelihood"""
            #0. update the noise level in accordance with the responsibilities
            for t in range(T):
                XS[:,t:R*T:T,-1] = 1/(Z[1,t]+1E-6)
                XJ[:,t:R*T:T,-1] = 1/(Z[0,t]+1E-6)

            GPS.setData(XS,Y)
            #here we joint the two conditions
            GPJ.setData(S.concatenate(XJ,axis=0),S.concatenate(Y,axis=0))
            #1. set the data to both processes

        MJ = [S.concatenate((M0[0],M1[0]),axis=0),S.concatenate((M0[1],M1[1]),axis=0)]


        C  = 2              #conditions
        R  = M0[0].shape[0] #repl.
        T  = M0[0].shape[1] #time
        D  = 2              #dim.


        #Responsibilities: components(2) x time                        
        Z  = 0.5*S.ones((2,T))

        #Data(X/Y): conditions x replicates x time x 2D
        X  = S.zeros((C,R*T,D))
        Y  = S.zeros((C,R*T))
        #unique times
        XT = S.ones((T,2))
        XT[:,0] = M0[0][0,:]

        [x0,y0] = self.M2GPxy(M0)
        [x1,y1] = self.M2GPxy(M1)
        
        X[0,:,0:2] = x0
        X[1,:,0:2] = x1
        Y[0,:]     = y0
        Y[1,:]     = y1
        #create indicator vector to identify unique time points

        
        #create one copy of the input per process as this is used for input dependen noise
        XS       = X.copy()
        XJ       = X.copy()

        #get hyperparameters form standard test:
        ratio = self.test(M0,M1,verbose=False,opt=opt)
        logtheta_s = self.gpr_0.logtheta
        logtheta_j = self.gpr_join.logtheta
        #initialize the two GPs
        if self.logtheta0 is None:
            logtheta = self.covar.getDefaultParams()

        #the two indv. GPs
        GP0 = GPR.GP(self.covar,Smean=self.Smean,logtheta=logtheta_s)
        GP1 = GPR.GP(self.covar,Smean=self.Smean,logtheta=logtheta_s)
        #the group GP summarising the two indiv. processes
        GPS = GPR.GroupGP([GP0,GP1])
        #the joint process
        GPJ = GPR.GP(self.covar,Smean=self.Smean,logtheta=logtheta_j)
        #update the GP
        updateGP()


        debug_plot = True

        for i in range(1):
            ###iterations###
            #1. get predictive distribution for both GPs
            ##debug
            #introduce the additional dimension to accom. the per obs. noise model
            #get prediction for all time points
            Yp0 = GP0.predict(GP0.logtheta,XT)
            Yp1 = GP1.predict(GP1.logtheta,XT)
            Ypj = GPJ.predict(GPJ.logtheta,XT)
            #considere residuals
            D0  = ((M0[1]-Yp0[0])**2 * (1/Yp0[1])).sum(axis=0)
            D1  = ((M1[1]-Yp1[0])**2 * (1/Yp1[1])).sum(axis=0)
            DJ  = ((MJ[1]-Ypj[0])**2 * (1/Ypj[1])).sum(axis=0)
            #the indiv. GP is the sum
            DS  = D0+D1
            #now use this to restimate Q(Z)
            ES  = S.exp(-DS)
            EJ  = S.exp(-DJ)
            #

            Z[0,:] =self.prior_Z[0]*EJ
            Z[1,:] =self.prior_Z[1]*ES
            Z     /=Z.sum(axis=0)
#             pydb.set_trace()
            updateGP()


        if verbose:
            PL.clf()
            labelSize = 15
            tickSize  = 12
            
            #1. plot the gp predictions
            ax1=PL.axes([0.1,0.1,0.8,0.7])
            Xt_ = S.linspace(0,XT[:,0].max()+2,100)
            Xt  = S.ones((Xt_.shape[0],2))
            Xt[:,0] = Xt_

            self.plotGPpredict(GP0,M0,Xt,{'alpha':0.1,'facecolor':'r'},{'linewidth':2,'color':'r'})
            self.plotGPpredict(GP1,M0,Xt,{'alpha':0.1,'facecolor':'g'},{'linewidth':2,'color':'g'})
            self.plotGPpredict(GPJ,M0,Xt,{'alpha':0.1,'facecolor':'b'},{'linewidth':2,'color':'b'})
            PL.plot(M0[0].T,M0[1].T,'r.--')
            PL.plot(M1[0].T,M1[1].T,'g.--')
            
            PL.xlim([Xt.min(),Xt.max()])
            #remove last ytick to avoid overlap
            yticks = ax1.get_yticks()[0:-2]
            ax1.set_yticks(yticks)
            xlabel('Time/h',size=labelSize)
            ylabel('Log expression level',size=labelSize)

            #now plot hinton diagram with responsibilities on top
            ax2=PL.axes([0.1,0.715,0.8,0.2],sharex=ax1)
#            ax2=PL.axes([0.1,0.7,0.8,0.2])
            #PL.plot(XT[:,0],Z[1,:])
            #swap the order of Z for optical purposes
            Z_= S.ones_like(Z)
            Z_[1,:] = Z[0,:]
            Z_[0,:] = Z[1,:]
            hinton(Z_,X=M0[0][0])
            ylabel('diff.')
            
            #hide axis labels
            setp( ax2.get_xticklabels(), visible=False)
            #font size
            setp( ax1.get_xticklabels(), fontsize=tickSize)
            setp( ax1.get_yticklabels(), fontsize=tickSize)
            setp( ax2.get_xticklabels(), fontsize=tickSize)
            #axes label
        return Z
        pass
示例#5
0
    [Spca, Wpca] = PCA(Y, K)

    #reconstruction
    Y_ = SP.dot(Spca, Wpca.T)

    #construct GPLVM model
    linear_cf = linear.LinearCFISO(n_dimensions=K)
    noise_cf = noise.NoiseISOCF()
    covariance = combinators.SumCF((linear_cf, noise_cf))

    #no inputs here (later SNPs)
    X = Spca.copy()
    #X = SP.random.randn(N,K)
    gplvm = GPLVM(covar_func=covariance, x=X, y=Y)

    gpr = GPR.GP(covar_func=covariance, x=X, y=Y[:, 0])

    #construct hyperparams
    covar = SP.log([1.0, 0.1])

    #X are hyperparameters, i.e. we optimize over them also

    #1. this is jointly with the latent X
    X_ = X.copy()
    hyperparams = {'covar': covar, 'x': X_}

    #for testing just covar params alone:
    #hyperparams = {'covar': covar}

    #evaluate log marginal likelihood
    lml = gplvm.lMl(hyperparams=hyperparams)