def _update_indicator(self,K,L): """ update the indicator """ _update = {'term': self.n_terms*SP.ones((K,L)).T.ravel(), 'row': SP.kron(SP.arange(K)[:,SP.newaxis],SP.ones((1,L))).T.ravel(), 'col': SP.kron(SP.ones((K,1)),SP.arange(L)[SP.newaxis,:]).T.ravel()} for key in _update.keys(): self.indicator[key] = SP.concatenate([self.indicator[key],_update[key]])
def _LMLgrad_s(self,hyperparams,debugging=False): """ evaluate gradients with respect to covariance matrix Sigma """ try: KV = self.get_covariances(hyperparams, debugging=debugging) except LA.LinAlgError: LG.error('linalg exception in _LMLgrad_x_sigma') return {'X_s':SP.zeros(hyperparams['X_s'].shape)} Si = 1./KV['Stilde_os'] SS = SP.dot(unravel(Si,self.n,self.t).T,KV['Stilde_o']) USU = SP.dot(KV['USi_c'],KV['Utilde_s']) Yhat = unravel(Si * ravel(KV['UYtildeU_os']),self.n,self.t) RV = {} if 'X_s' in hyperparams: USUY = SP.dot(USU,Yhat.T) USUYSYUSU = SP.dot(USUY,(KV['Stilde_o']*USUY).T) LMLgrad = SP.zeros((self.t,self.covar_s.n_dimensions)) LMLgrad_det = SP.zeros((self.t,self.covar_s.n_dimensions)) LMLgrad_quad = SP.zeros((self.t,self.covar_s.n_dimensions)) for d in xrange(self.covar_s.n_dimensions): Kd_grad = self.covar_s.Kgrad_x(hyperparams['covar_s'],d) UsU = SP.dot(Kd_grad.T,USU)*USU LMLgrad_det[:,d] = SP.dot(UsU,SS.T) # calculate gradient of squared form LMLgrad_quad[:,d] = -(USUYSYUSU*Kd_grad).sum(0) LMLgrad = LMLgrad_det + LMLgrad_quad RV['X_s'] = LMLgrad if debugging: _LMLgrad = SP.zeros((self.t,self.covar_s.n_dimensions)) for t in xrange(self.t): for d in xrange(self.covar_s.n_dimensions): Kgrad_x = self.covar_s.Kgrad_x(hyperparams['covar_s'],d,t) Kgrad_x = SP.kron(Kgrad_x,KV['K_o']) _LMLgrad[t,d] = 0.5*(KV['W']*Kgrad_x).sum() assert SP.allclose(LMLgrad,_LMLgrad,rtol=1E-3,atol=1E-2), 'ouch, something is wrong: %.2f'%LA.norm(LMLgrad-_LMLgrad) if 'covar_s' in hyperparams: theta = SP.zeros(len(hyperparams['covar_s'])) for i in range(len(theta)): Kgrad_s = self.covar_s.Kgrad_theta(hyperparams['covar_s'],i) UdKU = SP.dot(USU.T, SP.dot(Kgrad_s, USU)) SYUdKU = SP.dot(UdKU,KV['Stilde_o'] * Yhat.T) LMLgrad_det = SP.sum(Si*SP.kron(SP.diag(UdKU),KV['Stilde_o'])) LMLgrad_quad = -(Yhat.T*SYUdKU).sum() LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad) theta[i] = LMLgrad if debugging: Kd = SP.kron(Kgrad_s, KV['K_o']) _LMLgrad = 0.5 * (KV['W']*Kd).sum() assert SP.allclose(LMLgrad,_LMLgrad,rtol=1E-3,atol=1E-2), 'ouch, something is wrong: %.2f'%LA.norm(LMLgrad-_LMLgrad) RV['covar_s'] = theta return RV
def LMLdebug(self): """ LML function for debug """ assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000' y = SP.reshape(self.Y,(self.N*self.P), order='F') V = SP.kron(SP.eye(self.P),self.F) XX = SP.dot(self.Xr,self.Xr.T) K = SP.kron(self.Cr.K(),XX) K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N)) # inverse of K cholK = LA.cholesky(K) Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P)) # Areml and inverse Areml = SP.dot(V.T,SP.dot(Ki,V)) cholAreml = LA.cholesky(Areml) Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P)) # effect sizes and z b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y))) z = y-SP.dot(V,b) Kiz = SP.dot(Ki,z) # lml lml = y.shape[0]*SP.log(2*SP.pi) lml += 2*SP.log(SP.diag(cholK)).sum() lml += 2*SP.log(SP.diag(cholAreml)).sum() lml += SP.dot(z,Kiz) lml *= 0.5 return lml
def _LMLgrad_covar_debug(self,covar): assert self.N*self.P<2000, 'gp2kronSum:: N*P>=2000' y = SP.reshape(self.Y,(self.N*self.P), order='F') K = SP.kron(self.Cg.K(),self.XX) K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N)) cholK = LA.cholesky(K).T Ki = LA.cho_solve((cholK,True),SP.eye(y.shape[0])) Kiy = LA.cho_solve((cholK,True),y) if covar=='Cr': n_params = self.Cr.getNumberParams() elif covar=='Cg': n_params = self.Cg.getNumberParams() elif covar=='Cn': n_params = self.Cn.getNumberParams() RV = SP.zeros(n_params) for i in range(n_params): #0. calc grad_i if covar=='Cg': C = self.Cg.Kgrad_param(i) Kgrad = SP.kron(C,self.XX) elif covar=='Cn': C = self.Cn.Kgrad_param(i) Kgrad = SP.kron(C,SP.eye(self.N)) #1. der of log det RV[i] = 0.5*(Ki*Kgrad).sum() #2. der of quad form RV[i] -= 0.5*(Kiy*SP.dot(Kgrad,Kiy)).sum() return RV
def AlphaBetaCoeffs_old(n, a, b): " Construct the alpha and beta coefficient matrices. " Z = sp.matrix([[1,0],[0,-1]]) I = sp.identity(2) alpha = sp.zeros((2**n,2**n)) beta = sp.zeros((2**n,2**n)) m1 = [] m2 = [] for i in range(0,n): for m in range(0,n-1): m1.append(I) m1.insert(i, Z) temp1 = m1[0] m1.pop(0) while (len(m1) != 0): temp1 = sp.kron(temp1, m1[0]) m1.pop(0) alpha += temp1*a[i] for j in range(i+1, n): for m in range(0, n-2): m2.append(I) m2.insert(i, Z) m2.insert(j, Z) temp2 = m2[0] m2.pop(0) while (len(m2) != 0): temp2 = sp.kron(temp2, m2[0]) m2.pop(0) beta += (temp2)*b[i,j] return [alpha, beta]
def sqrtm3(X): M = sp.copy(X) m, fb, fe = block_structure(M) n = M.shape[0] for i in range(0,m): M[fb[i]:fe[i],fb[i]:fe[i]] = twobytworoot(M[fb[i]:fe[i],fb[i]:fe[i]]) #print M for j in range(1,m): for i in range(0,m-j): #print M[fb[i]:fe[i],fb[JJ]:fe[JJ]] JJ = i+j Tnoto = M[fb[i]:fe[i],fb[JJ]:fe[JJ]] #dopo togliere il copy #print "Tnot: " #print Tnoto for k in range(i+1,JJ): Tnoto -= (M[fb[i]:fe[i],fb[k]:fe[k]]).dot(M[fb[k]:fe[k],fb[JJ]:fe[JJ]]) #print M[fb[i]:fe[i],fb[k]:fe[k]] #print M[fb[k]:fe[k],fb[JJ]:fe[JJ]] if((M[fb[i]:fe[i],fb[JJ]:fe[JJ]]).shape==(1,1)): #print "forma 1" #print M[fb[i]:fe[i],fb[JJ]:fe[JJ]] # Uij #print M[fb[i]:fe[i],fb[i]:fe[i]] # Uii #print M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]] # Ujj M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = Tnoto/(M[fb[i]:fe[i],fb[i]:fe[i]] + M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]]) else: Uii = M[fb[i]:fe[i],fb[i]:fe[i]] Ujj = M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]] shapeUii = Uii.shape[0] shapeUjj = Ujj.shape[0] """ print "------------" print Tnoto print Tnoto.shape print sp.kron(sp.eye(shapeUjj),Uii) print sp.kron(Ujj.T,sp.eye(shapeUii)) print Tnoto """ #M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = sp.linalg.solve_sylvester(Uii, Ujj, Tnoto) """ x, scale, info = dtrsyl(Uii, Ujj, Tnoto if (scale==1.0): = x else: M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = x*scale print "scale!=0" """ Tnoto = Tnoto.reshape((shapeUii*shapeUjj),1,order="F") M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = \ linalg.solve(sp.kron(sp.eye(shapeUjj),Uii) + sp.kron(Ujj.T,sp.eye(shapeUii)), Tnoto).reshape(shapeUii,shapeUjj,order="F") return M
def diag_Ctilde_o_Sr(self, i): if i < self.Cg.getNumberParams(): r = sp.kron(sp.diag(self.LcGradCgLc(i)), self.Sr()) else: _i = i - self.Cg.getNumberParams() r = sp.kron(sp.diag(self.LcGradCnLc(_i)), sp.ones(self.dim_r)) return r
def varMLE(self): """ calculate inverse of fisher information """ self._update_cache() Sr = {} Sr['Cg'] = self.cache['Srstar'] Sr['Cn'] = SP.ones(self.N) n_params = self.Cg.getNumberParams()+self.Cn.getNumberParams() fisher = SP.zeros((n_params,n_params)) header = SP.zeros((n_params,n_params),dtype='|S10') C1 = SP.zeros((self.P,self.P)) C2 = SP.zeros((self.P,self.P)) idx1 = 0 for key1 in ['Cg','Cn']: for key1_p1 in range(self.P): for key1_p2 in range(key1_p1,self.P): C1[key1_p1,key1_p2] = C1[key1_p2,key1_p1] = 1 LCL1 = SP.dot(self.cache['Lc'],SP.dot(C1,self.cache['Lc'].T)) CSr1 = SP.kron(Sr[key1][:,SP.newaxis,SP.newaxis],LCL1[SP.newaxis,:]) DCSr1 = self.cache['D'][:,:,SP.newaxis]*CSr1 idx2 = 0 for key2 in ['Cg','Cn']: for key2_p1 in range(self.P): for key2_p2 in range(key2_p1,self.P): C2[key2_p1,key2_p2] = C2[key2_p2,key2_p1] = 1 LCL2 = SP.dot(self.cache['Lc'],SP.dot(C2,self.cache['Lc'].T)) CSr2 = SP.kron(Sr[key2][:,SP.newaxis,SP.newaxis],LCL2[SP.newaxis,:]) DCSr2 = self.cache['D'][:,:,SP.newaxis]*CSr2 fisher[idx1,idx2] = 0.5*(DCSr1*DCSr2).sum() header[idx1,idx2] = '%s%d%d_%s%d%d'%(key1,key1_p1,key1_p1,key2,key2_p1,key2_p2) C2[key2_p1,key2_p2] = C2[key2_p2,key2_p1] = 0 idx2+=1 C1[key1_p1,key1_p2] = C1[key1_p2,key1_p1] = 0 idx1+=1 RV = LA.inv(fisher) return RV,header
def K(self): if self.dim > _MAX_DIM: raise TooExpensiveOperationError(msg_too_expensive_dim(my_name(), _MAX_DIM)) rv = sp.kron(self.Cg.K(), self.R) + sp.kron(self.Cn.K(), sp.eye(self.dim_r)) return rv
def _LMLgrad_covar(self,hyperparams,debugging=False): """ evaluates the gradient of the log marginal likelihood with respect to the hyperparameters of the covariance function """ try: KV = self.get_covariances(hyperparams,debugging=debugging) except LA.LinAlgError: LG.error('linalg exception in _LMLgrad_covar') return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))} except ValueError: LG.error('value error in _LMLgrad_covar') return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))} RV = {} Si = unravel(1./KV['S'],self.n,self.t) if 'covar_r' in hyperparams: theta = SP.zeros(len(hyperparams['covar_r'])) for i in range(len(theta)): Kgrad_r = self.covar_r.Kgrad_theta(hyperparams['covar_r'],i) d=(KV['U_r']*SP.dot(Kgrad_r,KV['U_r'])).sum(0) LMLgrad_det = SP.dot(d,SP.dot(Si,KV['S_c'])) UdKU = SP.dot(KV['U_r'].T,SP.dot(Kgrad_r,KV['U_r'])) SYUdKU = SP.dot(UdKU,(KV['Ytilde']*SP.tile(KV['S_c'][SP.newaxis,:],(self.n,1)))) LMLgrad_quad = - (KV['Ytilde']*SYUdKU).sum() LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad) theta[i] = LMLgrad if debugging: Kd = SP.kron(KV['K_c'], Kgrad_r) _LMLgrad = 0.5 * (KV['W']*Kd).sum() assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_r' RV['covar_r'] = theta if 'covar_c' in hyperparams: theta = SP.zeros(len(hyperparams['covar_c'])) for i in range(len(theta)): Kgrad_c = self.covar_c.Kgrad_theta(hyperparams['covar_c'],i) d=(KV['U_c']*SP.dot(Kgrad_c,KV['U_c'])).sum(0) LMLgrad_det = SP.dot(KV['S_r'],SP.dot(Si,d)) UdKU = SP.dot(KV['U_c'].T,SP.dot(Kgrad_c,KV['U_c'])) SYUdKU = SP.dot((KV['Ytilde']*SP.tile(KV['S_r'][:,SP.newaxis],(1,self.t))),UdKU.T) LMLgrad_quad = -SP.sum(KV['Ytilde']*SYUdKU) LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad) theta[i] = LMLgrad if debugging: Kd = SP.kron(Kgrad_c, KV['K_r']) _LMLgrad = 0.5 * (KV['W']*Kd).sum() assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_c' RV['covar_c'] = theta return RV
def get_linds(N, eps): #Lindblad operators must have same range as Hamiltonian terms. In this case they are nearest-neighbour. Sp1 = (sp.kron(Sp, sp.eye(2))).reshape(2, 2, 2, 2) Sm2 = (sp.kron(sp.eye(2), Sm)).reshape(2, 2, 2, 2) L1 = (1, sp.sqrt(eps) * Sp1) L2 = (N-1, sp.sqrt(eps) * Sm2) return [L1, L2]
def _fit(self, type, vc=False): #2. init if type=='null': self.gp[type].covar.Cg.setCovariance(0.5*self.covY) self.gp[type].covar.Cn.setCovariance(0.5*self.covY) elif type=='full': Cg0_K = self.gp['null'].covar.Cg.K() Cn0_K = self.gp['null'].covar.Cn.K() self.gp[type].covar.Cr.setCovariance((Cn0_K+Cg0_K)/3.) self.gp[type].covar.Cg.setCovariance(2.*Cg0_K/3.) self.gp[type].covar.Cn.setCovariance(2.*Cn0_K/3.) elif type=='block': Crf_K = self.gp['full'].covar.Cr.K() Cnf_K = self.gp['full'].covar.Cn.K() self.gp[type].covar.Cr.scale = sp.mean(Crf_K) self.gp[type].covar.Cn.setCovariance(Cnf_K) elif type=='rank1': Crf_K = self.gp['full'].covar.Cr.K() Cnf_K = self.gp['full'].covar.Cn.K() self.gp[type].covar.Cr.setCovariance(Crf_K) self.gp[type].covar.Cn.setCovariance(Cnf_K) else: print('poppo') self.gp[type].optimize(factr=self.factr, verbose=False) RV = {'Cg': self.gp[type].covar.Cg.K(), 'Cn': self.gp[type].covar.Cn.K(), 'LML': sp.array([self.gp[type].LML()]), 'LMLgrad': sp.array([sp.mean((self.gp[type].LML_grad()['covar'])**2)])} if type=='null': RV['Cr'] = sp.zeros(RV['Cg'].shape) else: RV['Cr'] = self.gp[type].covar.Cr.K() if vc: # tr(P CoR) = tr(C)tr(R) - tr(Ones C) tr(Ones R) / float(NP) # = tr(C)tr(R) - C.sum() * R.sum() / float(NP) trRr = (self.Xr**2).sum() var_r = sp.trace(RV['Cr'])*trRr / float(self.Y.size-1) var_g = sp.trace(RV['Cg'])*self.trRg / float(self.Y.size-1) var_n = sp.trace(RV['Cn'])*self.Y.shape[0] var_n-= RV['Cn'].sum() / float(RV['Cn'].shape[0]) var_n/= float(self.Y.size-1) RV['var'] = sp.array([var_r, var_g, var_n]) if 0 and self.Y.size<5000: pdb.set_trace() Kr = sp.kron(RV['Cr'], sp.dot(self.Xr, self.Xr.T)) Kn = sp.kron(RV['Cn'], sp.eye(self.Y.shape[0])) _var_r = sp.trace(Kr-Kr.mean(0)) / float(self.Y.size-1) _var_n = sp.trace(Kn-Kn.mean(0)) / float(self.Y.size-1) _var = sp.array([_var_r, var_g, _var_n]) print(((_var-RV['var'])**2).mean()) if type=='full': # calculate within region vcs Cr_block = sp.mean(RV['Cr']) * sp.ones(RV['Cr'].shape) Cr_rank1 = lowrank_approx(RV['Cr'], rank=1) var_block = sp.trace(Cr_block)*trRr / float(self.Y.size-1) var_rank1 = sp.trace(Cr_rank1)*trRr / float(self.Y.size-1) RV['var_r'] = sp.array([var_block, var_rank1-var_block, var_r-var_rank1]) return RV
def test_inv(self): C = self.C L = sp.kron(C.Lc(), sp.eye(C.dim_r)) W = sp.kron(C.Wc(), C.Wr()) WdW = sp.dot(W.T, C.d()[:, sp.newaxis] * W) I_WdW = sp.eye(C.dim_c * C.dim_r) - WdW inv1 = sp.dot(L.T, sp.dot(I_WdW, L)) inv2 = C.inv() np.testing.assert_array_almost_equal(inv1, inv2)
def distanz(x, y=None): r""" Calculate the distanz between two colon vectors Parameters ---------- x : ndarray First colon vector y : ndarray Second colon vector Returns ------- d : ndarray Distance between x and y Examples -------- >>> import numpy as np >>> from pygsp import utils >>> x = np.random.rand(16) >>> y = np.random.rand(16) >>> distanz = utils.distanz(x, y) """ try: x.shape[1] except IndexError: x = x.reshape(1, x.shape[0]) if y is None: y = x else: try: y.shape[1] except IndexError: y = y.reshape(1, y.shape[0]) rx, cx = x.shape ry, cy = y.shape # Size verification if rx != ry: raise("The sizes of x and y do not fit") xx = (x*x).sum(axis=0) yy = (y*y).sum(axis=0) xy = np.dot(x.T, y) d = abs(sp.kron(sp.ones((cy, 1)), xx).T + sp.kron(sp.ones((cx, 1)), yy) - 2*xy) return np.sqrt(d)
def diag_Ctilde_o_Sr(self, i): np_r = self.Cr.getNumberParams() np_g = self.Cg.getNumberParams() if i < np_r: r = sp.kron(sp.diag(self.LcGradCrLc(i)), self.diagWrWr()) elif i < (np_r + np_g): _i = i - np_r r = sp.kron(sp.diag(self.LcGradCgLc(_i)), self.Sr()) else: _i = i - np_r - np_g r = sp.kron(sp.diag(self.LcGradCnLc(_i)), sp.ones(self.dim_r)) return r
def get_full_op(op): fop = sp.zeros((2**N, 2**N), dtype=sp.complex128) for n in range(1, min(len(op), N + 1)): if op[n] is None: continue n_sites = len(op[n].shape) / 2 opn = op[n].reshape(2**n_sites, 2**n_sites) fop_n = sp.kron(sp.eye(2**(n - 1)), sp.kron(opn, sp.eye(2**(N - n - n_sites + 1)))) assert fop.shape == fop_n.shape fop += fop_n return fop
def IsingHamiltonian_old(n, h, J, g): ### Construct Hamiltonian ### Z = sp.matrix([[1,0],[0,-1]]) X = sp.matrix([[0,1],[1,0]]) I = sp.identity(2) alpha = sp.zeros((2**n,2**n)) beta = sp.zeros((2**n,2**n)) delta = sp.zeros((2**n,2**n)) matrices = [] # Calculate alpha for i in range(0,n): for m in range(0,n-1): matrices.append(I) matrices.insert(i, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) alpha = alpha + temp*h[i] temp = 0 # Calculate beta for i in range(0,n): for j in range(0,n): if (i != j): for m in range(0,n-2): matrices.append(I) matrices.insert(i, Z) matrices.insert(j, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) beta = beta + temp*J[i,j] beta = beta + g*sp.identity(2**n) temp = 0 # Calculate delta for i in range(0,n) : for m in range(0,n-1): matrices.append(I) matrices.insert(i, X) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) delta += temp return [alpha, beta, delta]
def GenerateLabels(n): " Get proper labeling for output states. " # Generate bitstrings bitstring = [] for i in range(0, n + 1): bitstring.append(kbits(n, i)) # Flatten bitstring = list(itertools.chain.from_iterable(bitstring)) # Generate unit vectors statelist = [] poslist = [] pos = 0 unit0 = sp.array([1, 0]) unit1 = sp.array([0, 1]) for i in range(len(bitstring)): # Construct unit vector corresponding to bitstring state = unit1 if (bitstring[i][0] == '1') else unit0 for j in range(n - 1): state = sp.kron(state, unit1 if (bitstring[i][j + 1] == '1') else unit0) statelist.append(state) # Record orientation of unit vector (using position of 1 value) for j in range(2**n): if (statelist[-1][j]): pos = j break poslist.append(pos) # Sort the states sortperm = sp.array(poslist).argsort() bitstring = [bitstring[i] for i in sortperm] return bitstring
def LW(self): R = sp.zeros((self.mean.Y.size, self.mean.n_covs)) istart = 0 for ti in range(self.mean.n_terms): iend = istart + self.mean.F[ti].shape[1] * self.mean.A[ti].shape[0] R[:, istart:iend] = sp.kron(self.ALc()[ti].T, self.LrF()[ti]) return R
def _LML_covar(self, hyperparams): #calculate marginal likelihood of kronecker GP #1. get covariance structures needed: try: KV = self.get_covariances(hyperparams) except linalg.LinAlgError: LG.error("exception caught (%s)" % (str(hyperparams))) return 1E6 #2. build lml LML = 0 LMLc = 0.5 * self.nd * SP.log(2.0 * SP.pi) #constant part of negative lml #quadratic form Si = KV['Si'] LMLq = 0.5 * SP.dot(KV['y_rot'].ravel(), KV['YSi'].ravel()) #determinant stuff LMLd = -0.5 * SP.log(Si).sum() if VERBOSE: print "costly verbose debugging on" K = SP.kron(KV['Kr'], KV['Kc']) + SP.diag(KV['Knoise']) Ki = SP.linalg.inv(K) LMLq_ = 0.5 * SP.dot(SP.dot(self.y.ravel(), Ki), self.y.ravel()) LMLd_ = 0.5 * 2 * SP.log(SP.linalg.cholesky(K).diagonal()).sum() check_dist(LMLq, LMLq_) check_dist(LMLd, LMLd_) return LMLc + LMLq + LMLd
def W(self): R = sp.zeros((self.Y.size, self.n_covs)) istart = 0 for ti in range(self.n_terms): iend = istart + self.F[ti].shape[1] * self.A[ti].shape[0] R[:, istart:iend] = sp.kron(self.A[ti].T, self.F[ti]) return R
def L_basic_red(rate_mat, rate_dic, rowList, rowLen, pb=0.5, kex=300.0): # populate the Liouvillian with relaxation and chemical exchange terms a = np.zeros((1 + 2 * rowLen, 1 + 2 * rowLen), dtype=complex) # relaxation bits pa = 1.0 - pb a += rate_mat cnt = 0 for i, prod in enumerate(prodList): if i in rowList: if prod == 'Iz': # these are for the return of magnetisation to equilibrium a[cnt, 0] -= rate_dic['Iz'] * pa a[cnt + rowLen, 0] -= rate_dic['Iz'] * pb elif prod == 'Rz': a[cnt, 0] -= rate_dic['Rz'] * pa a[cnt + rowLen, 0] -= rate_dic['Rz'] * pb elif prod == 'Sz': a[cnt, 0] -= rate_dic['Sz'] * pa a[cnt + rowLen, 0] -= rate_dic['Sz'] * pb cnt += 1 # add exchange k_ge = kex * pb k_eg = kex * pa k_mat = np.array([[-k_ge, k_eg], [k_ge, -k_eg]]) k_mat = kron(k_mat, np.eye(rowLen)) a[1:2 * rowLen + 1, 1:2 * rowLen + 1] -= k_mat # signs are important! return a
def gaussian_checkerboard_kernel(L, sigma=10): ''' Computes a gaussian checkerboard kernel of shape (2L+1, 2L+1) The result is the Hadamar (element-wise) product of a centered 2D gaussian pdf of std `sigma` evaluated on (-L:L)x(-L:L) with a checkerboard kernel. Thus the resulting matrix is split into 4 quarts : 2 negatives quarts and 2 positive parts. INPUTS: - L : half length of the kernel - sigma : optional parameter : the std of the gaussian part of the kernel OUTPUS: - np.ndarray representing a gaussian checkerboard kernel of shape (2L+1) ''' # get gaussian part x, y = np.mgrid[-L:L, -L:L] pos = np.empty(x.shape + (2, )) pos[:, :, 0] = x pos[:, :, 1] = y rv = st.multivariate_normal([0, 0], sigma * np.eye(2)) pdf = rv.pdf(pos) # get checkerboard part checkerboard = kron(np.array([[1, -1], [-1, 1]]), np.ones(shape=(L, L))) # return return checkerboard * pdf
def GenerateLabels(n): " Get proper labeling for output states. " # Generate bitstrings bitstring = [] for i in range(0,n+1): bitstring.append(kbits(n, i)) # Flatten bitstring = list(itertools.chain.from_iterable(bitstring)) # Generate unit vectors statelist = [] poslist = [] pos = 0 unit0 = sp.array([1,0]) unit1 = sp.array([0,1]) for i in range(len(bitstring)): # Construct unit vector corresponding to bitstring state = unit1 if (bitstring[i][0] == '1') else unit0 for j in range(n-1): state = sp.kron(state, unit1 if (bitstring[i][j+1] == '1') else unit0) statelist.append(state) # Record orientation of unit vector (using position of 1 value) for j in range(2**n): if (statelist[-1][j]): pos = j break poslist.append(pos) # Sort the states sortperm = sp.array(poslist).argsort() bitstring = [ bitstring[i] for i in sortperm ] return bitstring
def ALcCtildeLcA_o_FRF(self, i): if i < self.covar.Cr.getNumberParams(): FRF = self.FGGF() else: FRF = self.FF() ALcCtildeALc = sp.dot(self.ALc(), self.CtildeLcA(i)) return sp.kron(ALcCtildeALc, FRF)
def WcCtildeLcA_o_WrRF(self, i): if i < self.covar.Cr.getNumberParams(): self.WrRF = self.WrGGF() else: self.WrRF = self.WrF() WcCtildeLcA = sp.dot(self.covar.Wc(), self.CtildeLcA(i)) return sp.kron(WcCtildeLcA, self.WrRF)
def _LMLgrad_lik(self,hyperparams): """ evaluates the gradient of the log marginal likelihood with respect to the hyperparameters of the likelihood function """ try: KV = self.get_covariances(hyperparams) except LA.LinAlgError: LG.error('linalg exception in _LML_covar') return {'lik':SP.zeros(len(hyperparams['lik']))} except ValueError: LG.error('value error in _LML_covar') return {'lik':SP.zeros(len(hyperparams['lik']))} YtildeVec = ravel(KV['Ytilde']) Kd_diag = self.likelihood.Kdiag_grad_theta(hyperparams['lik'],self.n,0) temp = SP.kron(SP.dot(KV['U_c'].T, SP.dot(KV['Binv'], SP.dot(KV['Binv'].T, KV['U_c']))),SP.diag(Kd_diag)) LMLgrad_det = SP.diag(SP.dot(SP.diag(1./KV['S']),temp)).sum() # Needs more optimization sigma_grad = 2 * SP.exp(2 * hyperparams['lik']) LMLgrad_quad = - (sigma_grad * YtildeVec * ravel(SP.dot(SP.eye(self.n), SP.dot(KV['Ytilde'],KV['UBinvBinvU'].T)))).sum() LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad) return {'lik':SP.array([LMLgrad])}
def _LML_covar(self, hyperparams): #calculate marginal likelihood of kronecker GP #1. get covariance structures needed: try: KV = self.get_covariances(hyperparams) except linalg.LinAlgError: LG.error("exception caught (%s)" % (str(hyperparams))) return 1E6 #2. build lml LML = 0 LMLc = 0.5* self.nd * SP.log(2.0 * SP.pi) #constant part of negative lml #quadratic form Si = KV['Si'] LMLq = 0.5 * SP.dot(KV['y_rot'].ravel(),KV['YSi'].ravel() ) #determinant stuff LMLd = -0.5 * SP.log(Si).sum() if VERBOSE: print "costly verbose debugging on" K = SP.kron(KV['Kr'],KV['Kc']) + SP.diag(KV['Knoise']) Ki = SP.linalg.inv(K) LMLq_ = 0.5* SP.dot(SP.dot(self.y.ravel(),Ki),self.y.ravel()) LMLd_ = 0.5* 2 * SP.log(SP.linalg.cholesky(K).diagonal()).sum() check_dist(LMLq,LMLq_) check_dist(LMLd,LMLd_) return LMLc+LMLq+LMLd
def fitKronApprox(a): Sbg = SP.zeros_like(S[0]) Kbg = SP.zeros_like(K[0]) for i in range(len(S)): Sbg+= a[i]*S[i] for i in range(len(K)): Kbg+= a[i+len(S)]*K[i] Gamma1 = SP.kron(Sbg,Kbg) return ((Gamma-Gamma1)**2).sum()
def L_basic(rate_mat, rate_dic, pb=0.5, kex=300.0): # populate the Liouvillian with relaxation and chemical exchange terms a = np.zeros((127, 127), dtype=complex) # relaxation bits pa = 1.0 - pb a += rate_mat for i, prod in enumerate(prodList): if prod == 'Iz': # these are for the return of magnetisation to equilibrium a[i, 0] -= rate_dic['Iz'] * pa a[i + 63, 0] -= rate_dic['Iz'] * pb elif prod == 'Rz': a[i, 0] -= rate_dic['Rz'] * pa a[i + 63, 0] -= rate_dic['Rz'] * pb elif prod == 'Sz': a[i, 0] -= rate_dic['Sz'] * pa a[i + 63, 0] -= rate_dic['Sz'] * pb # add exchange k_ge = kex * pb k_eg = kex * pa k_mat = np.array([[-k_ge, k_eg], [k_ge, -k_eg]]) k_mat = kron(k_mat, np.eye(63)) a[1:127, 1:127] -= k_mat # signs are important! return a
def getVarianceKron(C, R, verbose=False): """ get variance scaling of kron(C,R)""" n_K = len(C) * len(R) c = SP.kron(SP.diag(C), SP.diag(R)).sum() - 1. / n_K * SP.dot( R.T, SP.dot(SP.ones((R.shape[0], C.shape[0])), C)).sum() scalar = (n_K - 1) / c return 1.0 / scalar
def HamiltonianGen_old(n, a, b, d): """ Generate default Hamiltonian coefficients. """ ### Construct Hamiltonian ### Z = sp.matrix([[1,0],[0,-1]]) X = sp.matrix([[0,1],[1,0]]) I = sp.identity(2) alpha = sp.zeros((2**n,2**n)) beta = sp.zeros((2**n,2**n)) delta = sp.zeros((2**n,2**n)) matrices = [] # Calculate alpha for i in range(0,n): for m in range(0,n-1): matrices.append(I) matrices.insert(i, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) alpha = alpha + temp*a[i] temp = 0 # Calculate beta for i in range(0,n): for j in range(i+1,n): if (i != j): for m in range(0,n-2): matrices.append(I) matrices.insert(i, Z) matrices.insert(j, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) beta = beta + temp*b[i,j] temp = 0 # Calculate delta for i in range(0,n): for m in range(0,n-1): matrices.append(I) matrices.insert(i, X) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) delta += temp*d[i] return [alpha, beta, delta]
def _multiplex(self, bottom_gate, bottom_qubit_index, list_of_angles): """ Internal recursive method to create gates to perform rotations on the imaginary qubits: works by rotating LSB (and hence ALL imaginary qubits) by combo angle and then flipping sign (by flipping the bit, hence moving the complex amplitudes) of half the imaginary qubits (CNOT) followed by another combo angle on LSB, therefore executing conditional (on MSB) rotations, thereby disentangling LSB. """ list_len = len(list_of_angles) target_qubit = self.nth_qubit_from_least_sig_qubit(bottom_qubit_index) # Case of no multiplexing = base case for recursion if list_len == 1: return bottom_gate(list_of_angles[0], target_qubit) local_num_qubits = int(math.log2(list_len)) + 1 control_qubit = self.nth_qubit_from_least_sig_qubit( local_num_qubits - 1 + bottom_qubit_index) # calc angle weights, assuming recursion (that is the lower-level # requested angles have been correctly implemented by recursion angle_weight = scipy.kron([[0.5, 0.5], [0.5, -0.5]], numpy.identity(2 ** (local_num_qubits - 2))) # calc the combo angles list_of_angles = (angle_weight * numpy.matrix( list_of_angles).transpose()).reshape(-1).tolist()[0] combine_composite_gates = CompositeGate( "multiplex" + local_num_qubits.__str__(), [], self.arg) # recursive step on half the angles fulfilling the above assumption combine_composite_gates._attach( self._multiplex(bottom_gate, bottom_qubit_index, list_of_angles[0:(list_len // 2)])) # combine_composite_gates.cx(control_qubit,target_qubit) -> does not # work as expected because checks circuit # so attach CNOT as follows, thereby flipping the LSB qubit combine_composite_gates._attach(CnotGate(control_qubit, target_qubit)) # implement extra efficiency from the paper of cancelling adjacent # CNOTs (by leaving out last CNOT and reversing (NOT inverting) the # second lower-level multiplex) sub_gate = self._multiplex( bottom_gate, bottom_qubit_index, list_of_angles[(list_len // 2):]) if isinstance(sub_gate, CompositeGate): combine_composite_gates._attach(sub_gate.reverse()) else: combine_composite_gates._attach(sub_gate) # outer multiplex keeps final CNOT, because no adjacent CNOT to cancel # with if self.num_qubits == local_num_qubits + bottom_qubit_index: combine_composite_gates._attach(CnotGate(control_qubit, target_qubit)) return combine_composite_gates
def _multiplex(self, target_gate, list_of_angles): """ Return a recursive implementation of a multiplexor circuit, where each instruction itself has a decomposition based on smaller multiplexors. The LSB is the multiplexor "data" and the other bits are multiplexor "select". Args: target_gate (Gate): Ry or Rz gate to apply to target qubit, multiplexed over all other "select" qubits list_of_angles (list[float]): list of rotation angles to apply Ry and Rz Returns: DAGCircuit: the circuit implementing the multiplexor's action """ list_len = len(list_of_angles) local_num_qubits = int(math.log2(list_len)) + 1 q = QuantumRegister(local_num_qubits) circuit = QuantumCircuit(q) circuit.name = "multiplex" + local_num_qubits.__str__() lsb = q[0] msb = q[local_num_qubits - 1] # case of no multiplexing: base case for recursion if local_num_qubits == 1: circuit.append(target_gate(list_of_angles[0]), [q[0]]) return circuit # calc angle weights, assuming recursion (that is the lower-level # requested angles have been correctly implemented by recursion angle_weight = scipy.kron([[0.5, 0.5], [0.5, -0.5]], np.identity(2 ** (local_num_qubits - 2))) # calc the combo angles list_of_angles = angle_weight.dot(np.array(list_of_angles)).tolist() # recursive step on half the angles fulfilling the above assumption multiplex_1 = self._multiplex(target_gate, list_of_angles[0:(list_len // 2)]) circuit.append(multiplex_1.to_instruction(), q[0:-1]) # attach CNOT as follows, thereby flipping the LSB qubit circuit.append(CnotGate(), [msb, lsb]) # implement extra efficiency from the paper of cancelling adjacent # CNOTs (by leaving out last CNOT and reversing (NOT inverting) the # second lower-level multiplex) multiplex_2 = self._multiplex(target_gate, list_of_angles[(list_len // 2):]) if list_len > 1: circuit.append(multiplex_2.to_instruction().mirror(), q[0:-1]) else: circuit.append(multiplex_2.to_instruction(), q[0:-1]) # attach a final CNOT circuit.append(CnotGate(), [msb, lsb]) return circuit
def make_E_2s(A1, A2, B1, B2, op): res = sp.zeros((A1.shape[1]**2, A2.shape[2]**2), dtype=A1.dtype) for s in range(A1.shape[0]): for t in range(A2.shape[0]): for u in range(A1.shape[0]): for v in range(A2.shape[0]): res += sp.kron(A1[s].dot(A2[t]), B1[u].dot(B2[v]).conj()) * op[u, v, s, t] return res
def make_E_2s(A1, A2, B1, B2, op): res = sp.zeros((A1.shape[1]**2, A2.shape[2]**2), dtype=A1.dtype) for s in xrange(A1.shape[0]): for t in xrange(A2.shape[0]): for u in xrange(A1.shape[0]): for v in xrange(A2.shape[0]): res += sp.kron(A1[s].dot(A2[t]), B1[u].dot(B2[v]).conj()) * op[u, v, s, t] return res
def LW(self): R = sp.zeros((self.mean.Y.size, self.mean.n_covs)) istart = 0 for ti in range(self.mean.n_terms): iend = istart + self.mean.F[ti].shape[1] * self.mean.A[ti].shape[0] R[:, istart:iend] = sp.kron(self.ALc()[ti].T, self.LrF()[ti]) istart += iend return R
def getGradient(self,j): """ get rotated gradient for fixed effect i """ self._rotate() i = int(self.indicator['term'][j]) r = int(self.indicator['row'][j]) c = int(self.indicator['col'][j]) rv = -SP.kron(self.Fr[i][:,[r]],self.Ar[i][[c],:]) return rv
def getGradient(self, j): """ get rotated gradient for fixed effect i """ self._rotate() i = int(self.indicator['term'][j]) r = int(self.indicator['row'][j]) c = int(self.indicator['col'][j]) rv = -SP.kron(self.Fr[i][:, [r]], self.Ar[i][[c], :]) return rv
def predict(self, hyperparams, Xstar_r, compute_cov = False, debugging = False): """ predict on Xstar """ self._update_inputs(hyperparams) KV = self.get_covariances(hyperparams,debugging=debugging) self.covar_r.Xcross = Xstar_r Kstar_r = self.covar_r.Kcross(hyperparams['covar_r']) Kstar_c = self.covar_c.K(hyperparams['covar_c']) KinvY = SP.dot(KV['U_r'],SP.dot(KV['Ytilde'],KV['U_c'].T)) Ystar = SP.dot(Kstar_r.T,SP.dot(KinvY,Kstar_c)) Ystar = unravel(Ystar,self.covar_r.n_cross,self.t) if debugging: Kstar = SP.kron(Kstar_c,Kstar_r) Ynaive = SP.dot(Kstar.T,KV['alpha']) Ynaive = unravel(Ynaive,self.covar_r.n_cross,self.t) assert SP.allclose(Ystar,Ynaive), 'ouch, prediction does not work out' Ystar_covar = [] if compute_cov: CU = fast_dot(Kstar_c, KV['U_c']) s_rev = 1./KV['S'] Ystar_covar = SP.zeros([Xstar_r.shape[0], self.Y.shape[1]]) printProgressBar(0, Xstar_r.shape[0], prefix = 'Computing perdiction varaince:', suffix = 'Complete', length = 20) for i in range(Xstar_r.shape[0]): R_star_star = self.covar_r.K(hyperparams['covar_r'], SP.expand_dims(Xstar_r[i,:],axis=0)) self.covar_r.Xcross = SP.expand_dims(Xstar_r[i,:],axis=0) R_tr_star = self.covar_r.Kcross(hyperparams['covar_r']) RU = SP.dot(R_tr_star.T, KV['U_r']) q = SP.kron(SP.diag(Kstar_c), R_star_star) t = SP.zeros([self.t]) for j in range(self.t): temp = SP.kron(CU[j,:], RU) t[j,] = SP.sum((s_rev * temp).T * temp.T, axis = 0) Ystar_covar[i,:] = q - t if (i + 1) % (Xstar_r.shape[0]/10) == 0: printProgressBar(i+1, Xstar_r.shape[0], prefix = 'Computing perdiction varaince:', suffix = 'Complete', length = 20) self.covar_r.Xcross = Xstar_r return Ystar, Ystar_covar
def fitKronApprox(a): Sbg = SP.zeros_like(S[0]) Kbg = SP.zeros_like(K[0]) for i in range(len(S)): Sbg += a[i] * S[i] for i in range(len(K)): Kbg += a[i + len(S)] * K[i] Gamma1 = SP.kron(Sbg, Kbg) return ((Gamma - Gamma1)**2).sum()
def getK_verbose(KV,noise=True): if 'K' not in KV.keys(): K = SP.kron(KV['Kr'],KV['Kc']) if noise: K += SP.diag(KV['Knoise']) Ki = SP.linalg.inv(K) KV['K'] = K KV['Ki'] =Ki return [KV['K'],KV['Ki']]
def W(self): R = sp.zeros((self.Y.size, self.n_covs)) istart = 0 for ti in range(self.n_terms): iend = istart + self.F[ti].shape[1] * self.A[ti].shape[0] R[:, istart:iend] = sp.kron(self.A[ti].T, self.F[ti]) if self._miss: R = R[self._veIok, :] return R
def getK_verbose(KV, noise=True): if 'K' not in KV.keys(): K = SP.kron(KV['Kr'], KV['Kc']) if noise: K += SP.diag(KV['Knoise']) Ki = SP.linalg.inv(K) KV['K'] = K KV['Ki'] = Ki return [KV['K'], KV['Ki']]
def varMLE(self): """ calculate inverse of fisher information """ self._update_cache() Sr = {} Sr['Cg'] = self.cache['Srstar'] Sr['Cn'] = SP.ones(self.N) n_params = self.Cg.getNumberParams() + self.Cn.getNumberParams() fisher = SP.zeros((n_params, n_params)) header = SP.zeros((n_params, n_params), dtype='|S10') C1 = SP.zeros((self.P, self.P)) C2 = SP.zeros((self.P, self.P)) idx1 = 0 for key1 in ['Cg', 'Cn']: for key1_p1 in range(self.P): for key1_p2 in range(key1_p1, self.P): C1[key1_p1, key1_p2] = C1[key1_p2, key1_p1] = 1 LCL1 = SP.dot(self.cache['Lc'], SP.dot(C1, self.cache['Lc'].T)) CSr1 = SP.kron(Sr[key1][:, SP.newaxis, SP.newaxis], LCL1[SP.newaxis, :]) DCSr1 = self.cache['D'][:, :, SP.newaxis] * CSr1 idx2 = 0 for key2 in ['Cg', 'Cn']: for key2_p1 in range(self.P): for key2_p2 in range(key2_p1, self.P): C2[key2_p1, key2_p2] = C2[key2_p2, key2_p1] = 1 LCL2 = SP.dot(self.cache['Lc'], SP.dot(C2, self.cache['Lc'].T)) CSr2 = SP.kron( Sr[key2][:, SP.newaxis, SP.newaxis], LCL2[SP.newaxis, :]) DCSr2 = self.cache['D'][:, :, SP.newaxis] * CSr2 fisher[idx1, idx2] = 0.5 * (DCSr1 * DCSr2).sum() header[idx1, idx2] = '%s%d%d_%s%d%d' % ( key1, key1_p1, key1_p1, key2, key2_p1, key2_p2) C2[key2_p1, key2_p2] = C2[key2_p2, key2_p1] = 0 idx2 += 1 C1[key1_p1, key1_p2] = C1[key1_p2, key1_p1] = 0 idx1 += 1 RV = LA.inv(fisher) return RV, header
def K_grad_i(self,i): nCg = self.Cg.getNumberParams() if i >= self.getNumberParams(): raise ValueError("Trying to retrieve the gradient over a " "parameter that is inactive.") if self.dim > _MAX_DIM: raise TooExpensiveOperationError(msg_too_expensive_dim(my_name(), _MAX_DIM)) i = self._actindex2index(i) if i < nCg: rv= sp.kron(self.Cg.K_grad_i(i), self.R) else: _i = i - nCg rv = sp.kron(self.Cn.K_grad_i(_i), sp.eye(self.dim_r)) return rv
def K_grad_i(self, i): nCg = self.Cg.getNumberParams() if i >= self.getNumberParams(): raise ValueError("Trying to retrieve the gradient over a " "parameter that is inactive.") if self.dim > _MAX_DIM: raise TooExpensiveOperationError( msg_too_expensive_dim(my_name(), _MAX_DIM)) i = self._actindex2index(i) if i < nCg: rv = sp.kron(self.Cg.K_grad_i(i), self.R) else: _i = i - nCg rv = sp.kron(self.Cn.K_grad_i(_i), sp.eye(self.dim_r)) return rv
def aveCC2F(grid): "Construct the averaging operator on cell cell centers to faces." if grid.ndim == 1: aveCC2F = av_extrap(grid.shape[0]) elif grid.ndim == 2: aveCC2F = sp.vstack( (sp.kron(speye(grid.shape[1]), av_extrap(grid.shape[0])), sp.kron(av_extrap(grid.shape[1]), speye(grid.shape[0]))), format="csr") elif grid.ndim == 3: aveCC2F = sp.vstack( (kron3(speye(grid.shape[2]), speye(grid.shape[1]), av_extrap(grid.shape[0])), kron3(speye(grid.shape[2]), av_extrap(grid.shape[1]), speye(grid.shape[0])), kron3(av_extrap(grid.shape[2]), speye(grid.shape[1]), speye(grid.shape[0]))), format="csr") return aveCC2F
def predict(self, hyperparams, xstar_r=None, xstar_c=None, var=False, hyperparams_star=None): """ Predict mean and variance for given **Parameters:** hyperparams : {} hyperparameters in logSpace xstar : [double] prediction inputs var : boolean return predicted variance interval_indices : [ int || bool ] Either scipy array-like of boolean indicators, or scipy array-like of integer indices, denoting which x indices to predict from data. hyperparams_star: optional alternative hyperparameters for cross covariance output : output dimension for prediction (0) """ if var: print "predictive varinace not supported yet" if hyperparams_star is None: hyperparams_star = hyperparams #1. get covariance sturcture KV = self.get_covariances(hyperparams) #cross covariance: Kr_star = self.covar_r.K(hyperparams_star['covar_r'], self.x_r, xstar_r) Kc_star = self.covar_c.K(hyperparams_star['covar_c'], self.x_c, xstar_c) Kr_starU = SP.dot(Kr_star, KV['Ur']) Kc_starU = SP.dot(Kc_star, KV['Uc']) mu = kronravel(Kr_starU, Kc_starU, KV['YSi']) if VERBOSE: #trivial computations [K, Ki] = getK_verbose(KV) Kstar = SP.kron(Kr_star, Kc_star) yt = SP.dot(Ki, self.y.ravel()) mu_slow = SP.dot(Kstar, yt) check_dist(mu.ravel(), mu_slow) return mu
def fullrho(qu, squ): print("#Starting full density matrix simulation!") #Build state from pure states received from the MPS processes rho = sp.zeros((2**N, 2**N), dtype=sp.complex128) psis = [None] * N_samp for n in range(N_samp): pnum, psi = squ.get() psis[pnum] = psi rho += sp.outer(psi, psi.conj()) squ.task_done() rho /= sp.trace(rho) Hfull = get_full_op(get_ham(N, lam)) linds = get_linds(N, eps) linds = [(n, L.reshape(tuple([sp.prod(L.shape[:sp.ndim(L)/2])]*2))) for (n, L) in linds] linds_full = [sp.kron(sp.eye(2**(n-1)), sp.kron(L, sp.eye(2**(N - n + 1) / L.shape[0]))) for (n, L) in linds] for L in linds_full: assert L.shape == Hfull.shape Qfull = -1.j * Hfull - 0.5 * sp.sum([L.conj().T.dot(L) for L in linds_full], axis=0) szs = [None] + [sp.kron(sp.kron(sp.eye(2**(n - 1)), Sz), sp.eye(2**(N - n))) for n in range(1, N + 1)] for i in range(N_steps + 1): rho /= sp.trace(rho) esz = [] for n in range(1, N + 1): esz.append(sp.trace(szs[n].dot(rho)).real) if i % res_every == 0: if qu is None: print(esz) else: qu.put([-1, i, esz]) qu.put([-2, i, [sp.NaN] * N]) #this slot is reserved for a second "exact" result #Do Euler steps, approximately integrating the Lindblad master equation rho += dt * (Qfull.dot(rho) + rho.dot(Qfull.conj().T) + sum([L.dot(rho).dot(L.conj().T) for L in linds_full]))
def christof_trick(A1, F1, D, A2=None, F2=None): if A2 is None: A2 = A1 if F2 is None: F2 = F1 out = sp.zeros([A1.shape[1] * F1.shape[1], A2.shape[1] * F2.shape[1]]) for c in range(A1.shape[0]): Oc = sp.dot(A1[[c], :].T, A2[[c], :]) Or = sp.dot(F1.T, D[:, [c]] * F2) out += sp.kron(Oc, Or) return out
def Trans_prof(self, rho, phi, z): coef = sp.sqrt(2 * math.factorial(self._p) / ((1 + sp.kron(0, self._m)) * sp.pi * math.factorial(self._m + self._p))) gouy = sp.exp(1j * (2 * self._p + self._m + 1) * self.Gouy(z)) / self.Spot_size(z) expon = sp.exp(-1j * self.k * rho**2 / (2 * self.R_curve(z)) - rho**2 / self.Spot_size(z)**2 + 1j * self._m * phi) return coef * gouy * (sp.sqrt(2) * rho / self.Spot_size(z))**self._m * genlaguerre( self._p, self._m)(rho) * expon
def LMLdebug(self): """ LML function for debug """ assert self.N*self.P<2000, 'gp2kronSum:: N*P>=2000' y = SP.reshape(self.Y,(self.N*self.P), order='F') K = SP.kron(self.Cg.K(),self.XX) K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N)) cholK = LA.cholesky(K) Kiy = LA.cho_solve((cholK,False),y) lml = y.shape[0]*SP.log(2*SP.pi) lml += 2*SP.log(SP.diag(cholK)).sum() lml += SP.dot(y,Kiy) lml *= 0.5 return lml
def _update_cache(self): """ Update cache """ cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed if self.Xr_has_changed: start = TIME.time() """ Row SVD Bg + Noise """ Urstar, S, V = NLA.svd(self.Xr) self.cache['Srstar'] = SP.concatenate( [S**2, SP.zeros(self.N - S.shape[0])]) self.cache['Lr'] = Urstar.T self.mean.setRowRotation(Lr=self.cache['Lr']) smartSum(self.time, 'cache_XXchanged', TIME.time() - start) smartSum(self.count, 'cache_XXchanged', 1) if cov_params_have_changed: start = TIME.time() """ Col SVD Noise """ S2, U2 = LA.eigh(self.Cn.K() + self.offset * SP.eye(self.P)) self.cache['Sc2'] = S2 US2 = SP.dot(U2, SP.diag(SP.sqrt(S2))) USi2 = SP.dot(U2, SP.diag(SP.sqrt(1. / S2))) """ Col SVD region """ A = SP.reshape(self.Cr.getParams(), (self.P, self.rank), order='F') Astar = SP.dot(USi2.T, A) Ucstar, S, V = NLA.svd(Astar) self.cache['Scstar'] = SP.concatenate( [S**2, SP.zeros(self.P - S.shape[0])]) self.cache['Lc'] = SP.dot(Ucstar.T, USi2.T) """ pheno """ self.mean.setColRotation(self.cache['Lc']) if cov_params_have_changed or self.Xr_has_changed: """ S """ self.cache['s'] = SP.kron(self.cache['Scstar'], self.cache['Srstar']) + 1 self.cache['d'] = 1. / self.cache['s'] self.cache['D'] = SP.reshape(self.cache['d'], (self.N, self.P), order='F') """ pheno """ self.cache['LY'] = self.mean.evaluate() self.cache['DLY'] = self.cache['D'] * self.cache['LY'] smartSum(self.time, 'cache_colSVDpRot', TIME.time() - start) smartSum(self.count, 'cache_colSVDpRot', 1) self.Y_has_changed = False self.Xr_has_changed = False self.Cr.params_have_changed = False self.Cn.params_have_changed = False
def data_simulation(n_samples, n_dimensions, n_tasks, n_latent, train_portion): # true parameters true_param = dict() true_param['X_c'] = SP.random.randn(n_tasks, n_latent) true_param['X_s'] = SP.random.randn(n_tasks, n_latent) X = SP.random.randn(n_samples, n_dimensions) #/ SP.sqrt(n_dimensions) R = SP.dot(X, X.T) true_param['C'] = SP.dot(true_param['X_c'], true_param['X_c'].T) true_param['Sigma'] = SP.dot(true_param['X_s'], true_param['X_s'].T) K = SP.kron(true_param['C'], R) + SP.kron(true_param['Sigma'], SP.eye(n_samples)) y = SP.random.multivariate_normal(SP.zeros(n_tasks * n_samples), K) Y = SP.reshape(y, (n_samples, n_tasks), order='F') temp = SP.random.permutation(n_samples) idx_train = temp[0:np.int(train_portion * n_samples), ] idx_test = temp[np.int(train_portion * n_samples):, ] X_train = X[idx_train, :] X_test = X[idx_test, :] Y_train = Y[idx_train, :] Y_test = Y[idx_test, :] return X_train, X_test, Y_train, Y_test, true_param
def genNoise(self, vTot=0.4, vCommon=0.2): vSpecifc = vTot - vCommon # common Yc = SP.kron(SP.randn(self.N, 1), SP.randn(1, self.P)) Yc *= SP.sqrt(vCommon / Yc.var(0).mean()) # independent Yi = SP.randn(self.N, self.P) Yi *= SP.sqrt(vSpecifc / Yi.var(0).mean()) return Yc, Yi