def LML(self,params=None,*kw_args): """ calculate LML """ if params is not None: self.setParams(params) self._update_cache() start = TIME.time() #1. const term lml = self.N*self.P*SP.log(2*SP.pi) #2. logdet term lml += SP.sum(SP.log(self.cache['s'])) lml += self.S*SP.sum(SP.log(self.cache['Se'])) lml += self.rank*SP.sum(SP.log(self.cache['Sg'])) lml += self.N*SP.sum(SP.log(self.cache['Sn'])) #3. quatratic term lml += SP.sum(self.cache['LZ']*self.cache['LZ']) lml -= SP.sum(self.cache['WLZ']*self.cache['DWLZ']) #4. reml term if self.F is not None: lml += 2*SP.log(SP.diag(self.cache['Areml_chol'])).sum() lml *= 0.5 smartSum(self.time,'lml',TIME.time()-start) smartSum(self.count,'lml',1) return lml
def LML(self,params=None,*kw_args): """ calculate LML """ if params is not None: self.setParams(params) self._update_cache() start = TIME.time() #1. const term lml = self.N*self.P*SP.log(2*SP.pi) #2. logdet term lml += SP.sum(SP.log(self.cache['Sc2']))*self.N lml += 2*SP.log(SP.diag(self.cache['cholB'])).sum() #3. quatratic term lml += SP.sum(self.cache['LY']*self.cache['LY']) lml -= SP.sum(self.cache['WLY']*self.cache['BiWLY']) lml *= 0.5 smartSum(self.time,'lml',TIME.time()-start) smartSum(self.count,'lml',1) return lml
def LML(self, params=None, *kw_args): """ calculate LML """ if params is not None: self.setParams(params) self._update_cache() start = TIME.time() #1. const term lml = self.N * self.P * SP.log(2 * SP.pi) #2. logdet term lml += SP.sum(SP.log(self.cache['s'])) lml += self.S * SP.sum(SP.log(self.cache['Se'])) lml += self.rank * SP.sum(SP.log(self.cache['Sg'])) lml += self.N * SP.sum(SP.log(self.cache['Sn'])) #3. quatratic term lml += SP.sum(self.cache['LZ'] * self.cache['LZ']) lml -= SP.sum(self.cache['WLZ'] * self.cache['DWLZ']) #4. reml term if self.F is not None: lml += 2 * SP.log(SP.diag(self.cache['Areml_chol'])).sum() lml *= 0.5 smartSum(self.time, 'lml', TIME.time() - start) smartSum(self.count, 'lml', 1) return lml
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed if self.Xr_has_changed: start = TIME.time() """ Row SVD Bg + Noise """ Urstar, S, V = NLA.svd(self.Xr) self.cache['Srstar'] = SP.concatenate( [S**2, SP.zeros(self.N - S.shape[0])]) self.cache['Lr'] = Urstar.T self.mean.setRowRotation(Lr=self.cache['Lr']) smartSum(self.time, 'cache_XXchanged', TIME.time() - start) smartSum(self.count, 'cache_XXchanged', 1) if cov_params_have_changed: start = TIME.time() """ Col SVD Noise """ S2, U2 = LA.eigh(self.Cn.K() + self.offset * SP.eye(self.P)) self.cache['Sc2'] = S2 US2 = SP.dot(U2, SP.diag(SP.sqrt(S2))) USi2 = SP.dot(U2, SP.diag(SP.sqrt(1. / S2))) """ Col SVD region """ A = SP.reshape(self.Cr.getParams(), (self.P, self.rank), order='F') Astar = SP.dot(USi2.T, A) Ucstar, S, V = NLA.svd(Astar) self.cache['Scstar'] = SP.concatenate( [S**2, SP.zeros(self.P - S.shape[0])]) self.cache['Lc'] = SP.dot(Ucstar.T, USi2.T) """ pheno """ self.mean.setColRotation(self.cache['Lc']) if cov_params_have_changed or self.Xr_has_changed: """ S """ self.cache['s'] = SP.kron(self.cache['Scstar'], self.cache['Srstar']) + 1 self.cache['d'] = 1. / self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'], (self.N, self.P), order='F') """ pheno """ self.cache['LY'] = self.mean.evaluate() self.cache['DLY'] = self.cache['D'] * self.cache['LY'] smartSum(self.time, 'cache_colSVDpRot', TIME.time() - start) smartSum(self.count, 'cache_colSVDpRot', 1) self.Y_has_changed = False self.Xr_has_changed = False self.Cr.params_have_changed = False self.Cn.params_have_changed = False
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed if self.Xr_has_changed: start = TIME.time() """ Row SVD Bg + Noise """ Urstar,S,V = NLA.svd(self.Xr) self.cache['Srstar'] = SP.concatenate([S**2,SP.zeros(self.N-S.shape[0])]) self.cache['Lr'] = Urstar.T self.mean.setRowRotation(Lr=self.cache['Lr']) smartSum(self.time,'cache_XXchanged',TIME.time()-start) smartSum(self.count,'cache_XXchanged',1) if cov_params_have_changed: start = TIME.time() """ Col SVD Noise """ S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P)) self.cache['Sc2'] = S2 US2 = SP.dot(U2,SP.diag(SP.sqrt(S2))) USi2 = SP.dot(U2,SP.diag(SP.sqrt(1./S2))) """ Col SVD region """ A = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F') Astar = SP.dot(USi2.T,A) Ucstar,S,V = NLA.svd(Astar) self.cache['Scstar'] = SP.concatenate([S**2,SP.zeros(self.P-S.shape[0])]) self.cache['Lc'] = SP.dot(Ucstar.T,USi2.T) """ pheno """ self.mean.setColRotation(self.cache['Lc']) if cov_params_have_changed or self.Xr_has_changed: """ S """ self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1 self.cache['d'] = 1./self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F') """ pheno """ self.cache['LY'] = self.mean.evaluate() self.cache['DLY'] = self.cache['D']*self.cache['LY'] smartSum(self.time,'cache_colSVDpRot',TIME.time()-start) smartSum(self.count,'cache_colSVDpRot',1) self.Y_has_changed = False self.Xr_has_changed = False self.Cr.params_have_changed = False self.Cn.params_have_changed = False
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cg.params_have_changed or self.Cn.params_have_changed if self.XX_has_changed: start = TIME.time() """ Row SVD Bg + Noise """ self.cache['Srstar'],Urstar = LA.eigh(self.XX) self.cache['Lr'] = Urstar.T self.mean.setRowRotation(Lr=self.cache['Lr']) smartSum(self.time,'cache_XXchanged',TIME.time()-start) smartSum(self.count,'cache_XXchanged',1) if cov_params_have_changed: start = TIME.time() """ Col SVD Bg + Noise """ S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P)) self.cache['Sc2'] = S2 US2 = SP.dot(U2,SP.diag(SP.sqrt(S2))) USi2 = SP.dot(U2,SP.diag(SP.sqrt(1./S2))) Cstar = SP.dot(USi2.T,SP.dot(self.Cg.K(),USi2)) self.cache['Scstar'],Ucstar = LA.eigh(Cstar) self.cache['Lc'] = SP.dot(Ucstar.T,USi2.T) """ pheno """ self.mean.setColRotation(self.cache['Lc']) if cov_params_have_changed or self.XX_has_changed: """ S """ self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1 self.cache['d'] = 1./self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F') """ pheno """ self.cache['LY'] = self.mean.evaluate() self.cache['DLY'] = self.cache['D']*self.cache['LY'] smartSum(self.time,'cache_colSVDpRot',TIME.time()-start) smartSum(self.count,'cache_colSVDpRot',1) self.XX_has_changed = False self.Cg.params_have_changed = False self.Cn.params_have_changed = False
def _LMLgrad_covar(self,covar,**kw_args): """ calculates LMLgrad for covariance parameters """ # precompute some stuff if covar=='Cg': LRLdiag = self.cache['Srstar'] n_params = self.Cg.getNumberParams() elif covar=='Cn': LRLdiag = SP.ones(self.N) n_params = self.Cn.getNumberParams() # fill gradient vector RV = SP.zeros(n_params) for i in range(n_params): #0. calc LCL start = TIME.time() if covar=='Cr': C = self.Cr.Kgrad_param(i) elif covar=='Cg': C = self.Cg.Kgrad_param(i) elif covar=='Cn': C = self.Cn.Kgrad_param(i) LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T)) #1. der of log det start = TIME.time() kronDiag = SP.kron(LCL.diagonal(),LRLdiag) RV[i] = SP.dot(self.cache['d'],kronDiag) smartSum(self.time,'lmlgrad_trace',TIME.time()-start) smartSum(self.count,'lmlgrad_trace',1) #2. der of quad form start = TIME.time() KDLY = LRLdiag[:,SP.newaxis]*SP.dot(self.cache['DLY'],LCL.T) RV[i] -= (self.cache['DLY']*KDLY).sum() smartSum(self.time,'lmlgrad_quadform',TIME.time()-start) smartSum(self.count,'lmlgrad_quadform',1) RV[i] *= 0.5 return RV
def _LMLgrad_covar(self,covar,**kw_args): """ calculates LMLgrad for covariance parameters """ start = TIME.time() # precompute some stuff if covar=='Cr': n_params = self.Cr.getNumberParams() elif covar=='Cg': n_params = self.Cg.getNumberParams() elif covar=='Cn': n_params = self.Cn.getNumberParams() if covar=='Cr': trR = self.cache['trXrXr'] RY = self.cache['XrXrY'] RLY = self.cache['XrXrLY'] WrRY1 = self.cache['XrXrXrY'] WrRY2 = self.cache['XXrXrY'] WrRLY1 = self.cache['XrXrXrLY'] WrRLY2 = self.cache['XXrXrLY'] XrRXr = self.cache['XrXrXrXr'] XrRX = self.cache['XrXrXrX'] XRX = self.cache['XXrXrX'] elif covar=='Cg': trR = self.cache['trXX'] RY = self.cache['XXY'] RLY = self.cache['XXLY'] WrRY1 = self.cache['XrXXY'] WrRY2 = self.cache['XXXY'] WrRLY1 = self.cache['XrXXLY'] WrRLY2 = self.cache['XXXLY'] XrRXr = self.cache['XrXXXr'] XrRX = self.cache['XrXXX'] XRX = self.cache['XXXX'] else: trR = self.N RY = self.Y RLY = self.cache['LY'] WrRY1 = self.cache['XrY'] WrRY2 = self.cache['XY'] WrRLY1 = self.cache['XrLY'] WrRLY2 = self.cache['XLY'] XrRXr = self.cache['XrXr'] XrRX = self.cache['XXr'].T XRX = self.cache['XX'] smartSum(self.time,'lmlgrad_trace2_rKDW_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace2_rKDW_%s'%covar,1) # fill gradient vector RV = SP.zeros(n_params) for i in range(n_params): #0. calc LCL if covar=='Cr': C = self.Cr.Kgrad_param(i) elif covar=='Cg': C = self.Cg.Kgrad_param(i) elif covar=='Cn': C = self.Cn.Kgrad_param(i) LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T)) ELCL = SP.dot(self.cache['Estar'].T,LCL) ELCLE = SP.dot(ELCL,self.cache['Estar']) ELCLCsh = SP.dot(ELCL,self.cache['CstarH']) CshLCL = SP.dot(self.cache['CstarH'].T,LCL) CshLCLCsh = SP.dot(CshLCL,self.cache['CstarH']) # WCoRW WCoRW11 = SP.kron(ELCLE,XrRXr) WCoRW12 = SP.kron(ELCLCsh,XrRX) WCoRW22 = SP.kron(CshLCLCsh,XRX) WCoRW = SP.array(SP.bmat([[WCoRW11,WCoRW12],[WCoRW12.T,WCoRW22]])) # WCoRLY WCoRLY1 = SP.dot(WrRLY1,ELCL.T) WCoRLY2 = SP.dot(WrRLY2,CshLCL.T) WCoRLY = SP.concatenate([SP.reshape(WCoRLY1,(WCoRLY1.size,1),order='F'), SP.reshape(WCoRLY2,(WCoRLY2.size,1),order='F')]) # CoRLY CoRLY = SP.dot(RLY,LCL.T) #1. der of log det start = TIME.time() trC = LCL.diagonal().sum() RV[i] = trC*trR RV[i]-= SP.sum(self.cache['Bi']*WCoRW) smartSum(self.time,'lmlgrad_trace2_WDKDW_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace2_WDKDW_%s'%covar,1) #2. der of quad form start = TIME.time() RV[i] -= SP.sum(self.cache['LY']*CoRLY) RV[i] -= SP.sum(self.cache['BiWLY']*SP.dot(WCoRW,self.cache['BiWLY'])) RV[i] += 2*SP.sum(self.cache['BiWLY']*WCoRLY) smartSum(self.time,'lmlgrad_quadForm_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_quadForm_%s'%covar,1) RV[i] *= 0.5 return RV
def _LMLgrad_covar(self, covar, **kw_args): """ calculates LMLgrad for covariance parameters """ # precompute some stuff if covar == 'Cr': trR = self.cache['trXrXr'] RLZ = self.cache['XrXrLZ'] SrDWLZ = self.cache['SgDWLZ'] WrRLZ = self.cache['WrXrXrLZ'] diagSr = self.cache['Sg'] n_params = self.Cr.getNumberParams() if self.F is not None: SrDWLY = self.cache['SgDWLY'] WrRLY = self.cache['WrXrXrLY'] SrDWLV_t = self.cache['SgDWLV_t'] WrRLF = self.cache['WrXrXrLrF'] FRF = self.cache['FLrXrXrLrF'] FRLrY = self.cache['FXrXrLrY'] elif covar == 'Cn': trR = self.N RLZ = self.cache['LZ'] SrDWLZ = self.cache['DWLZ'] WrRLZ = self.cache['WrLZ'] diagSr = SP.ones(self.S) n_params = self.Cn.getNumberParams() if self.F is not None: SrDWLY = self.cache['DWLY'] WrRLY = self.cache['WrLY'] SrDWLV = self.cache['DWLV'] WrRLF = self.cache['WrLrF'] SrDWLV_t = self.cache['DWLV_t'] FRF = self.cache['FF'] FRLrY = self.cache['FY'] # fill gradient vector RV = SP.zeros(n_params) for i in range(n_params): #0. calc LCL start = TIME.time() if covar == 'Cr': C = self.Cr.Kgrad_param(i) elif covar == 'Cn': C = self.Cn.Kgrad_param(i) LCL = SP.dot(self.cache['Lc'], SP.dot(C, self.cache['Lc'].T)) LLCLL = SP.dot(self.cache['Lc'].T, SP.dot(LCL, self.cache['Lc'])) LCLW = SP.dot(LCL, self.cache['Wc'].T) WLCLW = SP.dot(self.cache['Wc'], LCLW) CoRLZ = SP.dot(RLZ, LCL.T) CoSrDWLZ = SP.dot(SrDWLZ, WLCLW.T) WCoRLZ = SP.dot(WrRLZ, LCLW) if self.F is not None: WcCLcA = SP.dot(SP.dot(self.cache['Wc'], LCL), self.cache['LcA']) CoSrDWLY = SP.dot(SrDWLY, WLCLW.T) DCoSrDWLY = self.cache['D'] * CoSrDWLY WCoRLY = SP.dot(WrRLY, LCLW) DWCoRLY = self.cache['D'] * WCoRLY #0a. grad of Areml if 1: Areml_grad = SP.dot( SP.kron(WcCLcA, WrRLF).T, self.cache['DWLV']) else: Areml_grad = SP.tensordot(SP.tensordot( WrRLF, self.cache['DWLV_t'], axes=(0, 0)), WcCLcA, axes=(1, 0)) # and then resize... Areml_grad += Areml_grad.T Areml_grad -= SP.kron(LLCLL, FRF) #TODO: think about LLCLL CoSrDWLV_t = SP.tensordot(SrDWLV_t, WLCLW, axes=(1, 1)) Areml_grad -= SP.tensordot(self.cache['DWLV_t'], CoSrDWLV_t, axes=([0, 1], [0, 2])) #0b. grad of beta B_grad1 = -SP.dot(FRLrY, LLCLL) B_grad1 -= SP.dot(SP.dot(self.cache['WrLrF'].T, DCoSrDWLY), self.cache['WcLcA']) B_grad1 += SP.dot(SP.dot(WrRLF.T, self.cache['DWLY']), WcCLcA) B_grad1 += SP.dot(SP.dot(self.cache['WrLrF'].T, DWCoRLY), self.cache['WcLcA']) b_grad = SP.reshape(B_grad1, (self.K * self.P, 1), order='F') b_grad -= SP.dot(Areml_grad, self.cache['b']) b_grad = SP.dot(self.cache['Areml_inv'], b_grad) #1. der of log det start = TIME.time() trC = LCL.diagonal().sum() RV[i] = trC * trR RV[i] -= SP.dot(self.cache['d'], SP.kron(WLCLW.diagonal(), diagSr)) smartSum(self.time, 'lmlgrad_trace', TIME.time() - start) smartSum(self.count, 'lmlgrad_trace', 1) #2. der of quad form start = TIME.time() RV[i] -= SP.sum(self.cache['LZ'] * CoRLZ) RV[i] -= SP.sum(self.cache['DWLZ'] * CoSrDWLZ) RV[i] += 2 * SP.sum(self.cache['DWLZ'] * WCoRLZ) if self.F is not None: RV[i] -= 2 * SP.dot(self.cache['vecVKiZ'].T, b_grad) smartSum(self.time, 'lmlgrad_quadform', TIME.time() - start) smartSum(self.count, 'lmlgrad_quadform', 1) if self.F is not None: #3. reml term RV[i] += (self.cache['Areml_inv'] * Areml_grad).sum() RV[i] *= 0.5 return RV
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed if self.Xr_has_changed: start = TIME.time() """ Row SVD on small matrix """ Ug, Sgh, Vg = NLA.svd(self.Xr, full_matrices=0) I = Sgh < self.tol if I.any(): warnings.warn( 'Xr has dependent columns, dimensionality reduced') Sgh = Sgh[~I] Ug = Ug[:, ~I] Vg = SP.eye(Sgh.shape[0]) Xr = Ug * Sgh[SP.newaxis, :] self.set_Xr(Xr) self.cache['Sg'] = Sgh**2 self.cache['Wr'] = Ug.T self.cache['Vg'] = Vg self.cache['trXrXr'] = self.cache['Sg'].sum() if cov_params_have_changed: start = TIME.time() """ Col SVD on big matrix """ self.cache['Sn'], Un = LA.eigh(self.Cn.K() + self.offset * SP.eye(self.P)) self.cache['Lc'] = (self.cache['Sn']**(-0.5))[:, SP.newaxis] * Un.T E = SP.reshape(self.Cr.getParams(), (self.P, self.rank), order='F') Estar = SP.dot(self.cache['Lc'], E) Ue, Seh, Ve = NLA.svd(Estar, full_matrices=0) self.cache['Se'] = Seh**2 self.cache['Wc'] = Ue.T if cov_params_have_changed or self.Xr_has_changed: """ S """ self.cache['s'] = SP.kron(1. / self.cache['Se'], 1. / self.cache['Sg']) + 1 self.cache['d'] = 1. / self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'], (self.S, self.rank), order='F') if self.Xr_has_changed or self.Y_has_changed: """ phenos transf """ self.cache['WrLrY'] = SP.dot(self.cache['Wr'], self.Y) XrLrY = SP.dot(self.Xr.T, self.Y) self.cache['XrXrLrY'] = SP.dot(self.Xr, XrLrY) self.cache['WrXrXrLrY'] = (self.cache['Sg']** 0.5)[:, SP.newaxis] * SP.dot( self.cache['Vg'], XrLrY) if (self.Xr_has_changed or self.F_has_changed) and self.F is not None: """ F transf """ self.cache['FF'] = SP.dot(self.F.T, self.F) self.cache['WrLrF'] = SP.dot(self.cache['Wr'], self.F) XrLrF = SP.dot(self.Xr.T, self.F) self.cache['XrXrLrF'] = SP.dot(self.Xr, XrLrF) self.cache['FLrXrXrLrF'] = SP.dot(self.F.T, self.cache['XrXrLrF']) self.cache['WrXrXrLrF'] = (self.cache['Sg']** 0.5)[:, SP.newaxis] * SP.dot( self.cache['Vg'], XrLrF) if (self.F_has_changed or self.Y_has_changed) and self.F is not None: self.cache['FY'] = SP.dot(self.F.T, self.Y) if (self.Xr_has_changed or self.F_has_changed or self.Y_has_changed) and self.F is not None: self.cache['FXrXrLrY'] = SP.dot(self.F.T, self.cache['XrXrLrY']) if cov_params_have_changed or self.Y_has_changed: """ phenos transf """ self.cache['LY'] = SP.dot(self.Y, self.cache['Lc'].T) self.cache['WrLY'] = SP.dot(self.cache['WrLrY'], self.cache['Lc'].T) self.cache['WLY'] = SP.dot(self.cache['WrLY'], self.cache['Wc'].T) self.cache['XrXrLY'] = SP.dot(self.cache['XrXrLrY'], self.cache['Lc'].T) self.cache['WrXrXrLY'] = SP.dot(self.cache['WrXrXrLrY'], self.cache['Lc'].T) if cov_params_have_changed and self.F is not None: """ A transf """ # A for now is just I self.cache['LcA'] = self.cache['Lc'] self.cache['Cni'] = SP.dot(self.cache['Lc'].T, self.cache['Lc']) self.cache['LcALcA'] = self.cache['Cni'] self.cache['WcLcA'] = SP.dot(self.cache['Wc'], self.cache['LcA']) if cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed: self.cache['DWLY'] = self.cache['D'] * self.cache['WLY'] self.cache['SgDWLY'] = self.cache[ 'Sg'][:, SP.newaxis] * self.cache['DWLY'] smartSum(self.time, 'cache_colSVDpRot', TIME.time() - start) smartSum(self.count, 'cache_colSVDpRot', 1) if (cov_params_have_changed or self.Xr_has_changed or self.F_has_changed) and self.F is not None: self.cache['WLV'] = SP.kron(self.cache['WcLcA'], self.cache['WrLrF']) self.cache['DWLV'] = self.cache['d'][:, SP. newaxis] * self.cache['WLV'] self.cache['DWLV_t'] = SP.reshape( self.cache['DWLV'], (self.S, self.rank, self.P * self.K), order='F') self.cache['SgDWLV_t'] = self.cache[ 'Sg'][:, SP.newaxis, SP.newaxis] * self.cache['DWLV_t'] self.cache['Areml'] = SP.kron(self.cache['LcALcA'], self.cache['FF']) self.cache['Areml'] -= SP.dot(self.cache['WLV'].T, self.cache['DWLV']) self.cache['Areml_chol'] = LA.cholesky(self.cache['Areml']).T # TODO: handle pseudo inverses self.cache['Areml_inv'] = LA.cho_solve( (self.cache['Areml_chol'], True), SP.eye(self.K * self.P)) if (cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed or self.F_has_changed) and self.F is not None: VKiY = SP.dot(self.cache['FY'], self.cache['Cni']) #TODO: have not controlled factorization in the following line VKiY -= SP.dot(SP.dot(self.cache['WrLrF'].T, self.cache['DWLY']), self.cache['WcLcA']) self.cache['b'] = SP.dot( self.cache['Areml_inv'], SP.reshape(VKiY, (VKiY.size, 1), order='F')) self.cache['B'] = SP.reshape(self.cache['b'], (self.K, self.P), order='F') self.cache['BLc'] = SP.dot(self.cache['B'], self.cache['Lc'].T) self.cache['BLcWc'] = SP.dot(self.cache['BLc'], self.cache['Wc'].T) self.cache['Z'] = self.Y - SP.dot(self.F, self.cache['B']) self.cache['FZ'] = self.cache['FY'] - SP.dot( self.cache['FF'], self.cache['B']) self.cache['LZ'] = self.cache['LY'] - SP.dot( self.F, self.cache['BLc']) self.cache['WrLZ'] = self.cache['WrLY'] - SP.dot( self.cache['WrLrF'], self.cache['BLc']) self.cache['WLZ'] = self.cache['WLY'] - SP.dot( self.cache['WrLrF'], self.cache['BLcWc']) self.cache['DWLZ'] = self.cache['D'] * self.cache['WLZ'] self.cache['SgDWLZ'] = self.cache[ 'Sg'][:, SP.newaxis] * self.cache['DWLZ'] self.cache['XrXrLZ'] = self.cache['XrXrLY'] - SP.dot( self.cache['XrXrLrF'], self.cache['BLc']) self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY'] - SP.dot( self.cache['WrXrXrLrF'], self.cache['BLc']) VKiZ = SP.dot(self.cache['FZ'], self.cache['Cni']) VKiZ -= SP.dot(self.cache['WrLrF'].T, SP.dot(self.cache['DWLZ'], self.cache['WcLcA'])) self.cache['vecVKiZ'] = SP.reshape(VKiZ, (self.K * self.P, 1), order='F') if self.F is None: """ Then Z=Y """ self.cache['LZ'] = self.cache['LY'] self.cache['WLZ'] = self.cache['WLY'] self.cache['DWLZ'] = self.cache['DWLY'] self.cache['XrXrLZ'] = self.cache['XrXrLY'] self.cache['SgDWLZ'] = self.cache['SgDWLY'] self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY'] self.cache['WrLZ'] = self.cache['WrLY'] self.Y_has_changed = False self.F_has_changed = False self.Xr_has_changed = False self.Cr.params_have_changed = False self.Cn.params_have_changed = False
def _LMLgrad_covar(self,covar,**kw_args): """ calculates LMLgrad for covariance parameters """ # precompute some stuff if covar=='Cr': trR = self.cache['trXrXr'] RLZ = self.cache['XrXrLZ'] SrDWLZ = self.cache['SgDWLZ'] WrRLZ = self.cache['WrXrXrLZ'] diagSr = self.cache['Sg'] n_params = self.Cr.getNumberParams() if self.F is not None: SrDWLY = self.cache['SgDWLY'] WrRLY = self.cache['WrXrXrLY'] SrDWLV_t = self.cache['SgDWLV_t'] WrRLF = self.cache['WrXrXrLrF'] FRF = self.cache['FLrXrXrLrF'] FRLrY = self.cache['FXrXrLrY'] elif covar=='Cn': trR = self.N RLZ = self.cache['LZ'] SrDWLZ = self.cache['DWLZ'] WrRLZ = self.cache['WrLZ'] diagSr = SP.ones(self.S) n_params = self.Cn.getNumberParams() if self.F is not None: SrDWLY = self.cache['DWLY'] WrRLY = self.cache['WrLY'] SrDWLV = self.cache['DWLV'] WrRLF = self.cache['WrLrF'] SrDWLV_t = self.cache['DWLV_t'] FRF = self.cache['FF'] FRLrY = self.cache['FY'] # fill gradient vector RV = SP.zeros(n_params) for i in range(n_params): #0. calc LCL start = TIME.time() if covar=='Cr': C = self.Cr.Kgrad_param(i) elif covar=='Cn': C = self.Cn.Kgrad_param(i) LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T)) LLCLL = SP.dot(self.cache['Lc'].T,SP.dot(LCL,self.cache['Lc'])) LCLW = SP.dot(LCL,self.cache['Wc'].T) WLCLW = SP.dot(self.cache['Wc'],LCLW) CoRLZ = SP.dot(RLZ,LCL.T) CoSrDWLZ = SP.dot(SrDWLZ,WLCLW.T) WCoRLZ = SP.dot(WrRLZ,LCLW) if self.F is not None: WcCLcA = SP.dot(SP.dot(self.cache['Wc'],LCL),self.cache['LcA']) CoSrDWLY = SP.dot(SrDWLY,WLCLW.T) DCoSrDWLY = self.cache['D']*CoSrDWLY WCoRLY = SP.dot(WrRLY,LCLW) DWCoRLY = self.cache['D']*WCoRLY #0a. grad of Areml if 1: Areml_grad = SP.dot(SP.kron(WcCLcA,WrRLF).T,self.cache['DWLV']) else: Areml_grad = SP.tensordot(SP.tensordot(WrRLF,self.cache['DWLV_t'],axes=(0,0)),WcCLcA,axes=(1,0)) # and then resize... Areml_grad+= Areml_grad.T Areml_grad-= SP.kron(LLCLL,FRF) #TODO: think about LLCLL CoSrDWLV_t = SP.tensordot(SrDWLV_t,WLCLW,axes=(1,1)) Areml_grad-= SP.tensordot(self.cache['DWLV_t'],CoSrDWLV_t,axes=([0,1],[0,2])) #0b. grad of beta B_grad1 = -SP.dot(FRLrY,LLCLL) B_grad1-= SP.dot(SP.dot(self.cache['WrLrF'].T,DCoSrDWLY),self.cache['WcLcA']) B_grad1+= SP.dot(SP.dot(WrRLF.T,self.cache['DWLY']),WcCLcA) B_grad1+= SP.dot(SP.dot(self.cache['WrLrF'].T,DWCoRLY),self.cache['WcLcA']) b_grad = SP.reshape(B_grad1,(self.K*self.P,1),order='F') b_grad-= SP.dot(Areml_grad,self.cache['b']) b_grad = SP.dot(self.cache['Areml_inv'],b_grad) #1. der of log det start = TIME.time() trC = LCL.diagonal().sum() RV[i] = trC*trR RV[i]-= SP.dot(self.cache['d'],SP.kron(WLCLW.diagonal(),diagSr)) smartSum(self.time,'lmlgrad_trace',TIME.time()-start) smartSum(self.count,'lmlgrad_trace',1) #2. der of quad form start = TIME.time() RV[i]-= SP.sum(self.cache['LZ']*CoRLZ) RV[i]-= SP.sum(self.cache['DWLZ']*CoSrDWLZ) RV[i]+= 2*SP.sum(self.cache['DWLZ']*WCoRLZ) if self.F is not None: RV[i]-= 2*SP.dot(self.cache['vecVKiZ'].T,b_grad) smartSum(self.time,'lmlgrad_quadform',TIME.time()-start) smartSum(self.count,'lmlgrad_quadform',1) if self.F is not None: #3. reml term RV[i] += (self.cache['Areml_inv']*Areml_grad).sum() RV[i] *= 0.5 return RV
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed if self.Xr_has_changed: start = TIME.time() """ Row SVD on small matrix """ Ug,Sgh,Vg = NLA.svd(self.Xr,full_matrices=0) I = Sgh<self.tol if I.any(): warnings.warn('Xr has dependent columns, dimensionality reduced') Sgh = Sgh[~I] Ug = Ug[:,~I] Vg = SP.eye(Sgh.shape[0]) Xr = Ug*Sgh[SP.newaxis,:] self.set_Xr(Xr) self.cache['Sg'] = Sgh**2 self.cache['Wr'] = Ug.T self.cache['Vg'] = Vg self.cache['trXrXr'] = self.cache['Sg'].sum() if cov_params_have_changed: start = TIME.time() """ Col SVD on big matrix """ self.cache['Sn'],Un = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P)) self.cache['Lc'] = (self.cache['Sn']**(-0.5))[:,SP.newaxis]*Un.T E = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F') Estar = SP.dot(self.cache['Lc'],E) Ue,Seh,Ve = NLA.svd(Estar,full_matrices=0) self.cache['Se'] = Seh**2 self.cache['Wc'] = Ue.T if cov_params_have_changed or self.Xr_has_changed: """ S """ self.cache['s'] = SP.kron(1./self.cache['Se'],1./self.cache['Sg'])+1 self.cache['d'] = 1./self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'],(self.S,self.rank), order='F') if self.Xr_has_changed or self.Y_has_changed: """ phenos transf """ self.cache['WrLrY'] = SP.dot(self.cache['Wr'],self.Y) XrLrY = SP.dot(self.Xr.T,self.Y) self.cache['XrXrLrY'] = SP.dot(self.Xr,XrLrY) self.cache['WrXrXrLrY'] = (self.cache['Sg']**0.5)[:,SP.newaxis]*SP.dot(self.cache['Vg'],XrLrY) if (self.Xr_has_changed or self.F_has_changed) and self.F is not None: """ F transf """ self.cache['FF'] = SP.dot(self.F.T,self.F) self.cache['WrLrF'] = SP.dot(self.cache['Wr'],self.F) XrLrF = SP.dot(self.Xr.T,self.F) self.cache['XrXrLrF'] = SP.dot(self.Xr,XrLrF) self.cache['FLrXrXrLrF'] = SP.dot(self.F.T,self.cache['XrXrLrF']) self.cache['WrXrXrLrF'] = (self.cache['Sg']**0.5)[:,SP.newaxis]*SP.dot(self.cache['Vg'],XrLrF) if (self.F_has_changed or self.Y_has_changed) and self.F is not None: self.cache['FY'] = SP.dot(self.F.T,self.Y) if (self.Xr_has_changed or self.F_has_changed or self.Y_has_changed) and self.F is not None: self.cache['FXrXrLrY'] = SP.dot(self.F.T,self.cache['XrXrLrY']) if cov_params_have_changed or self.Y_has_changed: """ phenos transf """ self.cache['LY'] = SP.dot(self.Y,self.cache['Lc'].T) self.cache['WrLY'] = SP.dot(self.cache['WrLrY'],self.cache['Lc'].T) self.cache['WLY'] = SP.dot(self.cache['WrLY'],self.cache['Wc'].T) self.cache['XrXrLY'] = SP.dot(self.cache['XrXrLrY'],self.cache['Lc'].T) self.cache['WrXrXrLY'] = SP.dot(self.cache['WrXrXrLrY'],self.cache['Lc'].T) if cov_params_have_changed and self.F is not None: """ A transf """ # A for now is just I self.cache['LcA'] = self.cache['Lc'] self.cache['Cni'] = SP.dot(self.cache['Lc'].T,self.cache['Lc']) self.cache['LcALcA'] = self.cache['Cni'] self.cache['WcLcA'] = SP.dot(self.cache['Wc'],self.cache['LcA']) if cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed: self.cache['DWLY'] = self.cache['D']*self.cache['WLY'] self.cache['SgDWLY'] = self.cache['Sg'][:,SP.newaxis]*self.cache['DWLY'] smartSum(self.time,'cache_colSVDpRot',TIME.time()-start) smartSum(self.count,'cache_colSVDpRot',1) if (cov_params_have_changed or self.Xr_has_changed or self.F_has_changed) and self.F is not None: self.cache['WLV'] = SP.kron(self.cache['WcLcA'],self.cache['WrLrF']) self.cache['DWLV'] = self.cache['d'][:,SP.newaxis]*self.cache['WLV'] self.cache['DWLV_t'] = SP.reshape(self.cache['DWLV'],(self.S,self.rank,self.P*self.K),order='F') self.cache['SgDWLV_t'] = self.cache['Sg'][:,SP.newaxis,SP.newaxis]*self.cache['DWLV_t'] self.cache['Areml'] = SP.kron(self.cache['LcALcA'],self.cache['FF']) self.cache['Areml']-= SP.dot(self.cache['WLV'].T,self.cache['DWLV']) self.cache['Areml_chol'] = LA.cholesky(self.cache['Areml']).T # TODO: handle pseudo inverses self.cache['Areml_inv'] = LA.cho_solve((self.cache['Areml_chol'],True),SP.eye(self.K*self.P)) if (cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed or self.F_has_changed) and self.F is not None: VKiY = SP.dot(self.cache['FY'],self.cache['Cni']) #TODO: have not controlled factorization in the following line VKiY-= SP.dot(SP.dot(self.cache['WrLrF'].T,self.cache['DWLY']),self.cache['WcLcA']) self.cache['b'] = SP.dot(self.cache['Areml_inv'],SP.reshape(VKiY,(VKiY.size,1),order='F')) self.cache['B'] = SP.reshape(self.cache['b'],(self.K,self.P), order='F') self.cache['BLc'] = SP.dot(self.cache['B'],self.cache['Lc'].T) self.cache['BLcWc'] = SP.dot(self.cache['BLc'],self.cache['Wc'].T) self.cache['Z'] = self.Y-SP.dot(self.F,self.cache['B']) self.cache['FZ'] = self.cache['FY']-SP.dot(self.cache['FF'],self.cache['B']) self.cache['LZ'] = self.cache['LY']-SP.dot(self.F,self.cache['BLc']) self.cache['WrLZ'] = self.cache['WrLY']-SP.dot(self.cache['WrLrF'],self.cache['BLc']) self.cache['WLZ'] = self.cache['WLY']-SP.dot(self.cache['WrLrF'],self.cache['BLcWc']) self.cache['DWLZ'] = self.cache['D']*self.cache['WLZ'] self.cache['SgDWLZ'] = self.cache['Sg'][:,SP.newaxis]*self.cache['DWLZ'] self.cache['XrXrLZ'] = self.cache['XrXrLY']-SP.dot(self.cache['XrXrLrF'],self.cache['BLc']) self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY']-SP.dot(self.cache['WrXrXrLrF'],self.cache['BLc']) VKiZ = SP.dot(self.cache['FZ'],self.cache['Cni']) VKiZ-= SP.dot(self.cache['WrLrF'].T,SP.dot(self.cache['DWLZ'],self.cache['WcLcA'])) self.cache['vecVKiZ'] = SP.reshape(VKiZ,(self.K*self.P,1),order='F') if self.F is None: """ Then Z=Y """ self.cache['LZ'] = self.cache['LY'] self.cache['WLZ'] = self.cache['WLY'] self.cache['DWLZ'] = self.cache['DWLY'] self.cache['XrXrLZ'] = self.cache['XrXrLY'] self.cache['SgDWLZ'] = self.cache['SgDWLY'] self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY'] self.cache['WrLZ'] = self.cache['WrLY'] self.Y_has_changed = False self.F_has_changed = False self.Xr_has_changed = False self.Cr.params_have_changed = False self.Cn.params_have_changed = False
def _LMLgrad_covar(self,covar,**kw_args): """ calculates LMLgrad for covariance parameters """ start = TIME.time() # precompute some stuff if covar=='Cr': n_params = self.Cr.getNumberParams() elif covar=='Cg': n_params = self.Cg.getNumberParams() elif covar=='Cn': n_params = self.Cn.getNumberParams() KDW = SP.zeros_like(self.cache['DW']) if covar=='Cr': #_KDWt = NP.einsum('ij,ilk->jlk',self.cache['LXr'],self.cache['DWt']) #_KDWt = NP.einsum('ij,jlk->ilk',self.cache['LXr'],_KDWt) _KDWt = NP.tensordot(self.cache['LXr'],self.cache['DWt'],axes=(0,0)) _KDWt = NP.tensordot(self.cache['LXr'],_KDWt,axes=(1,0)) _KDLYpDLXBiz = SP.dot(self.cache['LXr'].T,self.cache['DLYpDLXBiz']) _KDLYpDLXBiz = SP.dot(self.cache['LXr'],_KDLYpDLXBiz) LRLdiag = (self.cache['LXr']**2).sum(1) elif covar=='Cg': _KDWt = self.cache['Srstar'][:,SP.newaxis,SP.newaxis]*self.cache['DWt'] _KDLYpDLXBiz = self.cache['Srstar'][:,SP.newaxis]*self.cache['DLYpDLXBiz'] LRLdiag = self.cache['Srstar'] else: _KDWt = self.cache['DWt'] _KDLYpDLXBiz = self.cache['DLYpDLXBiz'] LRLdiag = SP.ones(self.N) smartSum(self.time,'lmlgrad_trace2_rKDW_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace2_rKDW_%s'%covar,1) # fill gradient vector RV = SP.zeros(n_params) for i in range(n_params): #0. calc LCL if covar=='Cr': C = self.Cr.Kgrad_param(i) elif covar=='Cg': C = self.Cg.Kgrad_param(i) elif covar=='Cn': C = self.Cn.Kgrad_param(i) LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T)) #1. der of log det start = TIME.time() kronDiag = SP.kron(LCL.diagonal(),LRLdiag) RV[i] = SP.dot(self.cache['d'],kronDiag) smartSum(self.time,'lmlgrad_trace1_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace1_%s'%covar,1) start = TIME.time() #KDWt = NP.einsum('ijk,jl->ilk',_KDWt,LCL) KDWt = NP.tensordot(_KDWt,LCL,axes=(1,0)) smartSum(self.time,'lmlgrad_trace2_cKDW_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace2_cKDW_%s'%covar,1) start = TIME.time() #DKDWt = NP.einsum('ij,ijk->ijk',self.cache['D'],KDWt) #WDKDWt = NP.einsum('ijk,jl->ilk',DKDWt, self.cache['LAc']) #WDKDWt = NP.einsum('ij,ilk->jlk',self.cache['LXr'],WDKDWt) DKDWt = self.cache['D'][:,SP.newaxis,:]*KDWt WDKDWt = NP.tensordot(DKDWt,self.cache['LAc'],axes=(2,0)) WDKDWt = NP.tensordot(self.cache['LXr'],WDKDWt,axes=(0,0)) WDKDWt = NP.transpose(WDKDWt,(0,2,1)) WDKDW = WDKDWt.reshape((self.rank*self.S,self.rank*self.S),order='F') smartSum(self.time,'lmlgrad_trace2_WDKDW_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_trace2_WDKDW_%s'%covar,1) RV[i] -= (WDKDW*self.cache['Bi']).sum() #2. der of quad form start = TIME.time() KDLYpDLXBiz = SP.dot(_KDLYpDLXBiz,LCL.T) RV[i] -= (self.cache['DLYpDLXBiz']*KDLYpDLXBiz).sum() smartSum(self.time,'lmlgrad_quadForm_%s'%covar,TIME.time()-start) smartSum(self.count,'lmlgrad_quadForm_%s'%covar,1) RV[i] *= 0.5 return RV
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cg.params_have_changed or self.Cn.params_have_changed if self.XX_has_changed: start = TIME.time() """ Row SVD Bg + Noise """ self.cache['Srstar'],Urstar = LA.eigh(self.XX) self.cache['Lr'] = Urstar.T self.mean.setRowRotation(Lr=self.cache['Lr']) smartSum(self.time,'cache_XXchanged',TIME.time()-start) smartSum(self.count,'cache_XXchanged',1) if self.Xr_has_changed or self.XX_has_changed: start = TIME.time() """ rotate Xr and XrXr """ self.cache['LXr'] = SP.dot(self.cache['Lr'],self.Xr) smartSum(self.time,'cache_Xrchanged',TIME.time()-start) smartSum(self.count,'cache_Xrchanged',1) if cov_params_have_changed: start = TIME.time() """ Col SVD Bg + Noise """ S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P)) self.cache['Sc2'] = S2 US2 = SP.dot(U2,SP.diag(SP.sqrt(S2))) USi2 = SP.dot(U2,SP.diag(SP.sqrt(1./S2))) Cstar = SP.dot(USi2.T,SP.dot(self.Cg.K(),USi2)) self.cache['Scstar'],Ucstar = LA.eigh(Cstar) self.cache['Lc'] = SP.dot(Ucstar.T,USi2.T) """ pheno """ self.mean.setColRotation(self.cache['Lc']) """ region part """ self.cache['A'] = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F') self.cache['LAc'] = SP.dot(self.cache['Lc'],self.cache['A']) if cov_params_have_changed or self.XX_has_changed: """ S """ self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1 self.cache['d'] = 1./self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F') """ pheno """ self.cache['LY'] = self.mean.evaluate() self.cache['DLY'] = self.cache['D']*self.cache['LY'] smartSum(self.time,'cache_colSVDpRot',TIME.time()-start) smartSum(self.count,'cache_colSVDpRot',1) if cov_params_have_changed or self.XX_has_changed or self.Xr_has_changed: """ calculate B = I + kron(LcA,LrXr).T*D*kron(kron(LcA,LrXr)) """ start = TIME.time() W = SP.kron(self.cache['LAc'],self.cache['LXr']) self.cache['DW'] = W*self.cache['d'][:,SP.newaxis] self.cache['DWt'] = self.cache['DW'].reshape((self.N,self.P,self.rank*self.S),order='F') #B = NP.einsum('ijk,jl->ilk',self.cache['DWt'],self.cache['LAc']) #B = NP.einsum('ji,jlk->ilk',self.cache['LXr'],B) B = SP.tensordot(self.cache['DWt'],self.cache['LAc'],axes=(1,0)) B = NP.transpose(B, (0, 2, 1)) B = SP.tensordot(self.cache['LXr'],B,axes=(0,0)) B = B.reshape((self.rank*self.S,self.rank*self.S),order='F') B+= SP.eye(self.rank*self.S) smartSum(self.time,'cache_calcB',TIME.time()-start) smartSum(self.count,'cache_calcB',1) """ invert B """ start = TIME.time() self.cache['cholB'] = LA.cholesky(B).T self.cache['Bi'] = LA.cho_solve((self.cache['cholB'],True),SP.eye(self.S*self.rank)) smartSum(self.time,'cache_invB',TIME.time()-start) smartSum(self.count,'cache_invB',1) """ pheno """ start = TIME.time() Z = SP.dot(self.cache['LXr'].T,SP.dot(self.cache['DLY'],self.cache['LAc'])) self.cache['z'] = SP.reshape(Z,(self.S*self.rank), order='F') self.cache['Biz'] = LA.cho_solve((self.cache['cholB'],True),self.cache['z']) BiZ = SP.reshape(self.cache['Biz'],(self.S,self.rank), order='F') self.cache['DLYpDLXBiz'] = SP.dot(self.cache['LXr'],SP.dot(BiZ,self.cache['LAc'].T)) self.cache['DLYpDLXBiz'] *= -self.cache['D'] self.cache['DLYpDLXBiz'] += self.cache['DLY'] smartSum(self.time,'cache_phenoCalc',TIME.time()-start) smartSum(self.count,'cache_phenoCalc',1) self.XX_has_changed = False self.Xr_has_changed = False self.Y_has_changed = False self.Cr.params_have_changed = False self.Cg.params_have_changed = False self.Cn.params_have_changed = False