Пример #1
0
    def LML_blockwise(self, snp, Asnp=None, identity_trick=False, *kw_args):
        """
        calculate LML
        """
        self._gp._update_cache()

        if Asnp is None:
            nW_Asnp = self._gp.mean.Ystar().shape[1]
        else:
            nW_Asnp = Asnp.shape[1]

        #1. const term
        lml = self._gp.N * self._gp.P * np.log(2.0 * np.pi)

        #2. logdet term
        lml += np.sum(np.log(self._gp.cache['Sc2'])) * self._gp.N + np.log(
            self._gp.cache['s']).sum()

        #3. quadratic term
        #quad1 = (self._gp.mean.Zstar(identity_trick=identity_trick)*self._gp.mean.DLZ(identity_trick=identity_trick)).sum()

        XKY = self._gp.mean.compute_XKY(M=self._gp.mean.Yhat(),
                                        identity_trick=identity_trick)
        beta = self._gp.mean.beta_hat(identity_trick=identity_trick)
        var_total = (self._gp.mean.Yhat() * self._gp.mean.Ystar()).sum()
        var_exp = (XKY * beta).sum()

        #use blockwise matrix inversion
        #[  Areml,          XcovarXsnp
        #   XcovarXsnp.T    XsnpXsnp    ]

        XsnpXsnp = compute_X1KX2(Y=self._gp.mean.Ystar(),
                                 D=self._gp.mean.D,
                                 X1=snp,
                                 X2=snp,
                                 A1=Asnp,
                                 A2=Asnp)
        XcovarXsnp = np.zeros(
            (self._gp.mean.n_fixed_effs, nW_Asnp * snp.shape[1]))
        n_effs_sum = 0
        for term in xrange(self._gp.mean.n_terms):
            n_effs_term = self._gp.mean.Fstar()[term].shape[1]
            if self._gp.mean.A_identity[term]:
                n_effs_term *= self._gp.P
            else:
                n_effs_term *= self._gp.mean.Astar()[term].shape[1]
            if identity_trick and self._gp.mean.A_identity[term]:
                Astar_term = None
            else:
                Astar_term = self._gp.mean.Astar()[term]
            XcovarXsnp[n_effs_sum:n_effs_term + n_effs_sum, :] = compute_X1KX2(
                Y=self._gp.mean.Ystar(),
                D=self._gp.mean.D,
                X1=self._gp.mean.Fstar()[term],
                X2=snp,
                A1=Astar_term,
                A2=Asnp)
        AXcovarXsnp = self._gp.mean.Areml_solve(XcovarXsnp)
        XsnpXsnp_ = XsnpXsnp - XcovarXsnp.T.dot(AXcovarXsnp)
        #compute beta

        #compute a
        snpKY = compute_XYA(DY=self._gp.mean.Yhat(), X=snp, A=Asnp)
        if 0:
            XsnpXsnp_solver = psd_solve.psd_solver(XsnpXsnp_,
                                                   lower=True,
                                                   threshold=1e-10,
                                                   check_finite=True,
                                                   overwrite_a=False)
            #solve XsnpXsnp \ AXcovarXsnp*beta
            DCbeta = XsnpXsnp_solver.solve(XcovarXsnp.T.dot(beta),
                                           overwrite_b=True)
            #solve XsnpXsnp \ a
            Da = XsnpXsnp_solver.solve(snpKY, overwrite_b=False)
            beta_snp = Da - DCbeta
            beta_up = XcovarXsnp.dot(-beta_snp)
            var_expl_snp = (XKY *
                            (beta + beta_up)).sum() + (snpKY * beta_snp).sum()
            beta_all = np.concatenate([beta + beta_up, beta_snp])

        Areml_11 = self._gp.mean.Areml()
        Areml_12 = XcovarXsnp
        Areml_22 = XsnpXsnp
        Areml1 = np.concatenate((Areml_11, Areml_12), 1)
        Areml2 = np.concatenate((Areml_12.T, Areml_22), 1)
        Areml_all = np.concatenate((Areml1, Areml2), 0)
        Areml_all_solver = psd_solve.psd_solver(Areml_all,
                                                lower=True,
                                                threshold=1e-10,
                                                check_finite=True,
                                                overwrite_a=False)
        XKY_all = np.concatenate((XKY, snpKY), 0)
        beta_all_ = Areml_all_solver.solve(XKY_all)
        var_expl_all = (XKY_all * beta_all_).sum()

        var_res = var_total - var_expl_all  #var_expl_snp
        #import ipdb;ipdb.set_trace()
        lml += var_res
        lml *= 0.5

        #import ipdb;ipdb.set_trace()
        return lml, beta_all_, Areml_all, Areml_11
Пример #2
0
    def LML_blockwise(self, snp, Asnp=None, *kw_args):
        """
        calculate LML
        The beta of the SNP tested is computed using blockwise matrix inversion.
        """
        self._gp._update_cache()

        if Asnp is None:
            nW_Asnp = self._gp.mean.Ystar().shape[1]
        else:
            nW_Asnp = Asnp.shape[1]

        #1. const term
        lml = self._gp.N * self._gp.P * np.log(2.0 * np.pi)

        #2. logdet term
        lml += np.sum(np.log(self._gp.cache['Sc2'])) * self._gp.N + np.log(
            self._gp.cache['s']).sum()

        #3. quadratic term
        #quad1 = (self._gp.mean.Zstar(identity_trick=identity_trick)*self._gp.mean.DLZ(identity_trick=identity_trick)).sum()

        XKY = self._gp.mean.compute_XKY(M=self._gp.mean.Yhat())
        import ipdb
        ipdb.set_trace()
        beta = self._gp.mean.Areml_solve(XKY)
        var_total = (self._gp.mean.Yhat() * self._gp.mean.Ystar()).sum()
        var_expl = (XKY * beta).sum()

        #use blockwise matrix inversion
        #[  Areml,          XcovarXsnp
        #   XcovarXsnp.T    XsnpXsnp    ]

        XsnpXsnp = compute_X1KX2(Y=self._gp.mean.Ystar(),
                                 D=self._gp.mean.D,
                                 X1=snp,
                                 X2=snp,
                                 A1=Asnp,
                                 A2=Asnp)
        XcovarXsnp = np.zeros(
            (self._gp.mean.n_fixed_effs, nW_Asnp * snp.shape[1]))
        start = 0
        for term in xrange(self._gp.mean.n_terms):
            n_effs_term = self._gp.mean.Fstar()[term].shape[1]

            if self._gp.mean.identity_trick and self._gp.mean.A_identity[term]:
                Astar_term = None
                n_effs_term *= self._gp.P
            else:
                Astar_term = self._gp.mean.Astar()[term]
                n_effs_term *= Astar_term.shape[0]
            stop = start + n_effs_term
            block = compute_X1KX2(Y=self._gp.mean.Ystar(),
                                  D=self._gp.mean.D,
                                  X1=self._gp.mean.Fstar()[term],
                                  X2=snp,
                                  A1=Astar_term,
                                  A2=Asnp)
            XcovarXsnp[start:stop, :] = block
            start = stop
        AXcovarXsnp = self._gp.mean.Areml_solve(XcovarXsnp)
        XsnpXsnp_ = XsnpXsnp - XcovarXsnp.T.dot(AXcovarXsnp)

        #compute a
        snpKY = compute_XYA(DY=self._gp.mean.Yhat(), X=snp,
                            A=Asnp).ravel(order='F')

        XsnpXsnp_solver = psd_solve.psd_solver(XsnpXsnp_,
                                               lower=True,
                                               threshold=1e-10,
                                               check_finite=True,
                                               overwrite_a=False)
        #solve XsnpXsnp \ AXcovarXsnp*beta

        if 0:
            beta_snp1 = XsnpXsnp_solver.solve(snpKY, overwrite_b=False)
            beta_snp2 = XsnpXsnp_solver.solve(AXcovarXsnp.T.dot(XKY),
                                              overwrite_b=False)
            beta_snp = beta_snp1 - beta_snp2
        beta_snp = XsnpXsnp_solver.solve(snpKY - AXcovarXsnp.T.dot(XKY),
                                         overwrite_b=False)

        #solve XsnpXsnp \ a
        if 0:
            beta_up1 = AXcovarXsnp.dot(
                beta_snp2
            )  #AXcovarXsnp.dot(beta_snp)#self._gp.mean.Areml_solve(XcovarXsnp.dot(-beta_snp),identity_trick=identity_trick)#This is not correct
            beta_up2 = AXcovarXsnp.dot(beta_snp1)
            beta_up = beta_up1 - beta_up2
            beta_new = beta + beta_up
        beta_up = -AXcovarXsnp.dot(beta_snp)
        var_expl_update = (XKY * beta_up).sum()
        var_expl_snp = (snpKY * beta_snp).sum()

        var_expl_all = var_expl + var_expl_snp + var_expl_update
        beta_all = np.concatenate([beta + beta_up, beta_snp])
        var_res = var_total - var_expl_snp - var_expl_update - var_expl

        lml += var_res
        lml *= 0.5
        #if (var_expl_all)<var_expl-1e-4:
        if 0:  #debugging
            Areml1 = np.concatenate((self._gp.mean.Areml(), XcovarXsnp), 1)
            Areml2 = np.concatenate((XcovarXsnp.T, XsnpXsnp), 1)
            Areml_all = np.concatenate((Areml1, Areml2), 0)
            XKY_all = np.concatenate((XKY, snpKY), 0)

            Areml_all_solver = psd_solve.psd_solver(Areml_all,
                                                    lower=True,
                                                    threshold=1e-10,
                                                    check_finite=True,
                                                    overwrite_a=False)
            beta_all_ = Areml_all_solver.solve(XKY_all)
            Areml_all_inv = Areml_all_solver.solve(np.eye(XKY_all.shape[0]))
            Areml_inv = self._gp.mean.Areml_inv()
            Aremlinv_up = AXcovarXsnp.dot(XsnpXsnp_solver.solve(AXcovarXsnp.T))
            Areml_inv_part_1 = Areml_inv + Aremlinv_up
            Areml_inv_part_2 = XsnpXsnp_solver.solve(np.eye(XsnpXsnp.shape[0]))
            Areml_inv_part_3 = -XsnpXsnp_solver.solve(AXcovarXsnp.T)
            Areml_all_inv_1 = np.concatenate(
                (Areml_inv_part_1, Areml_inv_part_3.T), 1)
            Areml_all_inv_2 = np.concatenate(
                (Areml_inv_part_3, Areml_inv_part_2), 1)
            Areml_all_inv_ = np.concatenate((Areml_all_inv_1, Areml_all_inv_2),
                                            0)
            beta_all__ = np.dot(Areml_all_inv_, XKY_all)
            diff_areml = np.absolute(Areml_all_inv_ - Areml_all_inv).sum()
            diff_beta_ = np.absolute(beta_all_ - beta_all).sum()
            diff_beta__ = np.absolute(beta_all__ - beta_all).sum()
            diff_beta_hat = np.absolute(beta - beta___).sum()
            print "var_expl = %.4f" % var_expl
            print "var_expl_update = %.4f" % var_expl_update
            print "var_expl_snp = %.4f", var_expl_snp
            print "absdiff Areml = %.5f", diff_areml
            print "absdiff beta_ = %.5f", diff_beta_
            print "absdiff beta__ = %.5f", diff_beta__
            print "absdiff beta_hat = %.5f", diff_beta_hat
            import ipdb
            ipdb.set_trace()

        return lml, beta_all
Пример #3
0
    def LML_blockwise(self, snp, Asnp=None, *kw_args):
        """
        calculate LML
        The beta of the SNP tested is computed using blockwise matrix inversion.
        """
        self._gp._update_cache()
        
        if Asnp is None:
            nW_Asnp = self._gp.mean.Ystar().shape[1]	
        else:
            nW_Asnp = Asnp.shape[1]
            	
        #1. const term
        lml  = self._gp.N*self._gp.P*np.log(2.0*np.pi)

        #2. logdet term
        lml += np.sum(np.log(self._gp.cache['Sc2']))*self._gp.N + np.log(self._gp.cache['s']).sum()

        #3. quadratic term
        #quad1 = (self._gp.mean.Zstar(identity_trick=identity_trick)*self._gp.mean.DLZ(identity_trick=identity_trick)).sum()
        
        XKY = self._gp.mean.compute_XKY(M=self._gp.mean.Yhat())
        import ipdb; ipdb.set_trace()
        beta = self._gp.mean.Areml_solve(XKY)
        var_total = (self._gp.mean.Yhat()*self._gp.mean.Ystar()).sum()
        var_expl = (XKY*beta).sum()
        
        
        #use blockwise matrix inversion
        #[  Areml,          XcovarXsnp
        #   XcovarXsnp.T    XsnpXsnp    ]
        
        XsnpXsnp = compute_X1KX2(Y=self._gp.mean.Ystar(), D=self._gp.mean.D, X1=snp, X2=snp, A1=Asnp, A2=Asnp)
        XcovarXsnp = np.zeros((self._gp.mean.n_fixed_effs,nW_Asnp*snp.shape[1]))
        start = 0
        for term in xrange(self._gp.mean.n_terms):
            n_effs_term = self._gp.mean.Fstar()[term].shape[1]
            
            if self._gp.mean.identity_trick and self._gp.mean.A_identity[term]:
                Astar_term = None
                n_effs_term *= self._gp.P
            else:
                Astar_term = self._gp.mean.Astar()[term]
                n_effs_term *= Astar_term.shape[0]
            stop = start + n_effs_term
            block = compute_X1KX2(Y=self._gp.mean.Ystar(), D=self._gp.mean.D, X1=self._gp.mean.Fstar()[term], X2=snp, A1=Astar_term, A2=Asnp)
            XcovarXsnp[start:stop,:] = block
            start=stop
        AXcovarXsnp = self._gp.mean.Areml_solve(XcovarXsnp)
        XsnpXsnp_ = XsnpXsnp - XcovarXsnp.T.dot(AXcovarXsnp)
        
        #compute a
        snpKY = compute_XYA(DY=self._gp.mean.Yhat(), X=snp, A=Asnp).ravel(order='F')
        

        XsnpXsnp_solver = psd_solve.psd_solver(XsnpXsnp_, lower=True, threshold=1e-10,check_finite=True,overwrite_a=False)
        #solve XsnpXsnp \ AXcovarXsnp*beta

        if 0:
            beta_snp1 = XsnpXsnp_solver.solve(snpKY,overwrite_b=False)
            beta_snp2 = XsnpXsnp_solver.solve(AXcovarXsnp.T.dot(XKY),overwrite_b=False)
            beta_snp = beta_snp1 - beta_snp2
        beta_snp = XsnpXsnp_solver.solve(snpKY - AXcovarXsnp.T.dot(XKY),overwrite_b=False)

        #solve XsnpXsnp \ a
        if 0:
            beta_up1 = AXcovarXsnp.dot(beta_snp2) #AXcovarXsnp.dot(beta_snp)#self._gp.mean.Areml_solve(XcovarXsnp.dot(-beta_snp),identity_trick=identity_trick)#This is not correct
            beta_up2 = AXcovarXsnp.dot(beta_snp1)
            beta_up = beta_up1 - beta_up2
            beta_new = beta+beta_up
        beta_up = - AXcovarXsnp.dot(beta_snp)
        var_expl_update = (XKY*beta_up).sum()
        var_expl_snp = (snpKY*beta_snp).sum()

        var_expl_all = var_expl + var_expl_snp+var_expl_update
        beta_all = np.concatenate([beta+beta_up,beta_snp])
        var_res = var_total - var_expl_snp - var_expl_update - var_expl

        lml += var_res
        lml *= 0.5
        #if (var_expl_all)<var_expl-1e-4:
        if 0: #debugging
            Areml1 = np.concatenate((self._gp.mean.Areml(),XcovarXsnp),1)
            Areml2 = np.concatenate((XcovarXsnp.T, XsnpXsnp),1)
            Areml_all = np.concatenate((Areml1,Areml2),0)
            XKY_all = np.concatenate((XKY,snpKY),0)

            Areml_all_solver = psd_solve.psd_solver(Areml_all, lower=True, threshold=1e-10,check_finite=True,overwrite_a=False)
            beta_all_ = Areml_all_solver.solve(XKY_all)
            Areml_all_inv = Areml_all_solver.solve(np.eye(XKY_all.shape[0]))
            Areml_inv = self._gp.mean.Areml_inv()
            Aremlinv_up = AXcovarXsnp.dot(XsnpXsnp_solver.solve(AXcovarXsnp.T))
            Areml_inv_part_1 = Areml_inv + Aremlinv_up
            Areml_inv_part_2 = XsnpXsnp_solver.solve(np.eye(XsnpXsnp.shape[0]))
            Areml_inv_part_3 = - XsnpXsnp_solver.solve(AXcovarXsnp.T)
            Areml_all_inv_1 = np.concatenate((Areml_inv_part_1,Areml_inv_part_3.T),1)
            Areml_all_inv_2 = np.concatenate((Areml_inv_part_3,Areml_inv_part_2),1)
            Areml_all_inv_ = np.concatenate((Areml_all_inv_1,Areml_all_inv_2),0)
            beta_all__ = np.dot(Areml_all_inv_,XKY_all)
            diff_areml =  np.absolute(Areml_all_inv_-Areml_all_inv).sum()
            diff_beta_ = np.absolute(beta_all_-beta_all).sum()
            diff_beta__ = np.absolute(beta_all__-beta_all).sum()
            diff_beta_hat = np.absolute(beta-beta___).sum()
            print "var_expl = %.4f" % var_expl
            print "var_expl_update = %.4f" % var_expl_update
            print "var_expl_snp = %.4f",  var_expl_snp
            print "absdiff Areml = %.5f",diff_areml
            print "absdiff beta_ = %.5f",diff_beta_
            print "absdiff beta__ = %.5f",diff_beta__
            print "absdiff beta_hat = %.5f",diff_beta_hat
            import ipdb;ipdb.set_trace()
            
        return lml,beta_all
    def LML_blockwise(self, snp, Asnp=None, identity_trick=False, *kw_args):
        """
        calculate LML
        """
        self._gp._update_cache()
        
        if Asnp is None:
            nW_Asnp = self._gp.mean.Ystar().shape[1]	
        else:
            nW_Asnp = Asnp.shape[1]
            	

        #1. const term
        lml  = self._gp.N*self._gp.P*np.log(2.0*np.pi)

        #2. logdet term
        lml += np.sum(np.log(self._gp.cache['Sc2']))*self._gp.N + np.log(self._gp.cache['s']).sum()

        #3. quadratic term
        #quad1 = (self._gp.mean.Zstar(identity_trick=identity_trick)*self._gp.mean.DLZ(identity_trick=identity_trick)).sum()
        
        XKY = self._gp.mean.compute_XKY(M=self._gp.mean.Yhat(), identity_trick=identity_trick)
        beta = self._gp.mean.beta_hat(identity_trick=identity_trick)
        var_total = (self._gp.mean.Yhat()*self._gp.mean.Ystar()).sum()
        var_exp = (XKY*beta).sum()
        
        
        #use blockwise matrix inversion
        #[  Areml,          XcovarXsnp
        #   XcovarXsnp.T    XsnpXsnp    ]
        
        XsnpXsnp = compute_X1KX2(Y=self._gp.mean.Ystar(), D=self._gp.mean.D, X1=snp, X2=snp, A1=Asnp, A2=Asnp)
        XcovarXsnp = np.zeros((self._gp.mean.n_fixed_effs,nW_Asnp*snp.shape[1]))
        n_effs_sum = 0
        for term in range(self._gp.mean.n_terms):
            n_effs_term = self._gp.mean.Fstar()[term].shape[1]
            if self._gp.mean.A_identity[term]:
                n_effs_term *= self._gp.P
            else:
                n_effs_term *= self._gp.mean.Astar()[term].shape[1]
            if identity_trick and self._gp.mean.A_identity[term]:
                Astar_term = None
            else:
                Astar_term = self._gp.mean.Astar()[term]
            XcovarXsnp[n_effs_sum:n_effs_term+n_effs_sum,:] = compute_X1KX2(Y=self._gp.mean.Ystar(), D=self._gp.mean.D, X1=self._gp.mean.Fstar()[term], X2=snp, A1=Astar_term, A2=Asnp)
        AXcovarXsnp = self._gp.mean.Areml_solve(XcovarXsnp)
        XsnpXsnp_ = XsnpXsnp - XcovarXsnp.T.dot(AXcovarXsnp)
        #compute beta
       
        #compute a
        snpKY = compute_XYA(DY=self._gp.mean.Yhat(), X=snp, A=Asnp)
        if 0:
            XsnpXsnp_solver = psd_solve.psd_solver(XsnpXsnp_, lower=True, threshold=1e-10,check_finite=True,overwrite_a=False)
            #solve XsnpXsnp \ AXcovarXsnp*beta
            DCbeta = XsnpXsnp_solver.solve(XcovarXsnp.T.dot(beta),overwrite_b=True)
            #solve XsnpXsnp \ a
            Da = XsnpXsnp_solver.solve(snpKY,overwrite_b=False)
            beta_snp = Da-DCbeta
            beta_up = XcovarXsnp.dot(-beta_snp)
            var_expl_snp = (XKY*(beta + beta_up)).sum() + (snpKY*beta_snp).sum()
            beta_all = np.concatenate([beta+beta_up,beta_snp])
        
        

        Areml_11 = self._gp.mean.Areml()
        Areml_12 = XcovarXsnp
        Areml_22 = XsnpXsnp
        Areml1 = np.concatenate((Areml_11,Areml_12),1)
        Areml2 = np.concatenate((Areml_12.T,Areml_22),1)
        Areml_all = np.concatenate((Areml1,Areml2),0)
        Areml_all_solver = psd_solve.psd_solver(Areml_all, lower=True, threshold=1e-10,check_finite=True,overwrite_a=False)
        XKY_all = np.concatenate((XKY,snpKY),0)
        beta_all_ = Areml_all_solver.solve(XKY_all)
        var_expl_all=(XKY_all*beta_all_).sum()

        var_res = var_total - var_expl_all#var_expl_snp
        #import ipdb;ipdb.set_trace()
        lml += var_res
        lml *= 0.5

        #import ipdb;ipdb.set_trace()
        return lml,beta_all_,Areml_all,Areml_11