def __call__(self, mat, src, psi): assert (src != psi) self.history = [] verbose = g.default.is_verbose("cg") t0 = g.time() p, mmp, r = g.copy(src), g.copy(src), g.copy(src) guess = g.norm2(psi) mat(psi, mmp) # in, out d = g.innerProduct(psi, mmp).real b = g.norm2(mmp) r @= src - mmp p @= r a = g.norm2(p) cp = a ssq = g.norm2(src) rsq = self.eps**2. * ssq for k in range(1, self.maxiter + 1): c = cp mat(p, mmp) dc = g.innerProduct(p, mmp) d = dc.real a = c / d cp = g.axpy_norm2(r, -a, mmp, r) b = cp / c psi += a * p p @= b * p + r self.history.append(cp) if verbose: g.message("res^2[ %d ] = %g" % (k, cp)) if cp <= rsq: if verbose: t1 = g.time() g.message("Converged in %g s" % (t1 - t0)) break
def __call__(self, mat, src, psi): verbose = g.default.is_verbose("bicgstab") t0 = time() r, rhat, p, s = g.copy(src), g.copy(src), g.copy(src), g.copy(src) mmpsi, mmp, mms = g.copy(src), g.copy(src), g.copy(src) rho, rhoprev, alpha, omega = 1., 1., 1., 1. mat(psi, mmpsi) r @= src - mmpsi rhat @= r p @= r mmp @= r ssq = g.norm2(src) rsq = self.eps**2. * ssq for k in range(self.maxiter): rhoprev = rho rho = g.innerProduct(rhat, r).real beta = (rho / rhoprev) * (alpha / omega) p @= r + beta * p - beta * omega * mmp mat(p, mmp) alpha = rho / g.innerProduct(rhat, mmp).real s @= r - alpha * mmp mat(s, mms) ip, mms2 = g.innerProductNorm2(mms, s) if mms2 == 0.: continue omega = ip.real / mms2 psi += alpha * p + omega * s r2 = g.axpy_norm2(r, -omega, mms, s) if verbose: g.message("res^2[ %d ] = %g" % (k, r2)) if r2 <= rsq: if verbose: t1 = time() g.message("Converged in %g s" % (t1 - t0)) break
def log(i, convergence_threshold=0.5): # i = n*(1 + x), log(i) = log(n) + log(1+x) # x = i/n - 1, |x|^2 = <i/n - 1, i/n - 1> = |i|^2/n^2 + |1|^2 - (<i,1> + <1,i>)/n # d/dn |x|^2 = -2 |i|^2/n^3 + (<i,1> + <1,i>)/n^2 = 0 -> 2|i|^2 == n (<i,1> + <1,i>) if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) I = numpy.identity(x.otype.shape[0], x.grid.precision.complex_dtype) lI = gpt.lattice(x) lI[:] = I n = gpt.norm2(x) / gpt.innerProduct(x, lI).real x /= n x -= lI n2 = gpt.norm2(x)**0.5 / x.grid.gsites order = 8 * int(16 / (-numpy.log10(n2))) assert n2 < convergence_threshold o = gpt.copy(x) xn = gpt.copy(x) for j in range(2, order + 1): xn @= xn * x o -= xn * ((-1.0)**j / j) o += lI * numpy.log(n) if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r return o
def approx(dst, src): assert src != dst verbose = g.default.is_verbose("modes") t0 = g.time() dst[:] = 0 for i, x in enumerate(left): dst += f_evals[i] * x * g.innerProduct(right[i], src) if verbose: t1 = g.time() g.message("Approximation by %d modes took %g s" % (len(left), t1 - t0))
def __call__(self, matrix, src, dst): verbose = g.default.is_verbose("deflate") # |dst> = sum_n 1/ev[n] |n><n|src> t0 = g.time() dst[:] = 0 for i, n in enumerate(self.evec): dst += n * g.innerProduct(n, src) / self.ev[i] t1 = g.time() if verbose: g.message("Deflated in %g s" % (t1 - t0)) return self.inverter(matrix, src, dst)
def evals(matrix, evec, check_eps2 = None): assert(len(evec) > 0) tmp=g.lattice(evec[0]) ev=[] for i,v in enumerate(evec): matrix(v,tmp) # M |v> = l |v> -> <v|M|v> / <v|v> l=g.innerProduct(v,tmp).real / g.norm2(v) ev.append(l) if not check_eps2 is None: eps2=g.norm2(tmp - l*v) g.message("eval[ %d ] = %g, eps^2 = %g" % (i,l,eps2)) if eps2 > check_eps2: assert(0) return ev
def __call__(self, matrix, src, dst): verbose = g.default.is_verbose("deflate") # |dst> = sum_n 1/ev[n] |n><n|src> t0 = g.time() g.block.project(self.csrc, src, self.basis) t1 = g.time() self.cdst[:] = 0 for i, n in enumerate(self.cevec): self.cdst += n * g.innerProduct(n, self.csrc) / self.fev[i] t2 = g.time() g.block.promote(self.cdst, dst, self.basis) t3 = g.time() if verbose: g.message( "Coarse-grid deflated in %g s (project %g s, coarse deflate %g s, promote %g s)" % (t3 - t0, t1 - t0, t2 - t1, t3 - t2)) return self.inverter(matrix, src, dst)
def approx(dst, src): assert src != dst verbose = g.default.is_verbose("modes") t0 = g.time() src_coarse = g.lattice(right[0]) g.block.project(src_coarse, src, right_basis) dst_coarse = g.lattice(left[0]) dst_coarse[:] = 0 for i, x in enumerate(left): dst_coarse += f_evals[i] * x * g.innerProduct( right[i], src_coarse) g.block.promote(dst_coarse, dst, left_basis) if verbose: t1 = g.time() g.message("Approximation by %d coarse modes took %g s" % (len(left), t1 - t0))
def evals(matrix, evec, params): check_eps2 = params["check_eps2"] skip = params["skip"] assert len(evec) > 0 tmp = g.lattice(evec[0]) ev = [] for i in range(0, len(evec), skip): v = evec[i] matrix(tmp, v) # M |v> = l |v> -> <v|M|v> / <v|v> l = g.innerProduct(v, tmp) / g.norm2(v) if params["real"]: l = l.real ev.append(l) if check_eps2 is not None: eps2 = g.norm2(tmp - l * v) g.message(f"eval[ {i} ] = {l}, eps^2 = {eps2}") if eps2 > check_eps2: raise EvalsNotConverged() return ev
def orthogonalize(w, basis, ips=None, nblock=4): n = len(basis) if n == 0: return grid = basis[0].grid lip = numpy.array([0.0] * nblock, dtype=numpy.complex128) i = 0 t_rankInnerProduct = 0.0 t_globalSum = 0.0 t_linearCombination = 0.0 while i + nblock <= n: t_rankInnerProduct -= gpt.time() for j in range(nblock): lip[j] = gpt.rankInnerProduct(basis[i + j], w) t_rankInnerProduct += gpt.time() t_globalSum -= gpt.time() grid.globalsum(lip) t_globalSum += gpt.time() if ips is not None: for j in range(nblock): ips[i + j] = lip[j] expr = w - lip[0] * basis[i + 0] for j in range(1, nblock): expr -= lip[j] * basis[i + j] t_linearCombination -= gpt.time() w @= expr t_linearCombination += gpt.time() i += nblock gpt.message( "Timing Ortho: %g rankInnerProduct, %g globalsum, %g lc" % (t_rankInnerProduct, t_globalSum, t_linearCombination) ) while i < n: ip = gpt.innerProduct(basis[i], w) w -= ip * basis[i] if ips is not None: ips[i] = ip i += 1
def __call__(self, mat, src, ckpt=None): # verbosity verbose = g.default.is_verbose("irl") # checkpointer if ckpt is None: ckpt = g.checkpointer_none() ckpt.grid = src.grid self.ckpt = ckpt # first approximate largest eigenvalue pit = g.algorithms.eigen.power_iteration(eps=0.05, maxiter=10, real=True) lambda_max = pit(mat, src)[0] # parameters Nm = self.params["Nm"] Nu = self.params["Nu"] Nk = self.params["Nk"] Nstop = self.params["Nstop"] Np = Nm-Nk MaxIter=self.params["maxiter"] Np /= MaxIter assert Nm >= Nk and Nstop <= Nk print ( 'Nm=',Nm,'Nu=',Nu,'Nk=',Nk ) # tensors dtype = np.float64 ctype = np.complex128 lme = np.zeros((Nu,Nm), ctype) lmd = np.zeros((Nu,Nm), ctype) lme2 = np.zeros((Nu,Nm), ctype) lmd2 = np.empty((Nu,Nm), ctype) Qt = np.zeros((Nm,Nm),ctype) Q = np.zeros((Nm,Nm),ctype) ev = np.empty((Nm,), dtype) ev2_copy = np.empty((Nm,), dtype) # fields f = g.lattice(src) v = g.lattice(src) evec = [g.lattice(src) for i in range(Nm)] w = [g.lattice(src) for i in range(Nu)] w_copy = [g.lattice(src) for i in range(Nu)] # advice memory storage if not self.params["advise"] is None: g.advise(evec, self.params["advise"]) # scalars k1 = 1 k2 = Nk beta_k = 0.0 rng=g.random("test") # set initial vector # rng.zn(w) for i in range(Nu): rng.zn(w[i]) if i > 0: g.orthogonalize(w[i],evec[0:i]) evec[i]=g.copy(w[i]) evec[i] *= 1.0/ g.norm2(evec[i]) ** 0.5 g.message("norm(evec[%d]=%e "%(i,g.norm2(evec[i]))) if i > 0: for j in range(i): ip=g.innerProduct(evec[j],w[i]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (j,i,ip.real,ip.imag)) # evec[i] @= src[i] / g.norm2(src[i]) ** 0.5 # initial Nk steps Nblock_k = int(Nk/Nu) for b in range(Nblock_k): self.blockStep(mat, lmd, lme, evec, w, w_copy, Nm, b,Nu) Nblock_p = int(Np/Nu) # restarting loop # for it in range(self.params["maxiter"]): for it in range(MaxIter): if verbose: g.message("Restart iteration %d" % it) Nblock_l = Nblock_k + it*Nblock_p; Nblock_r = Nblock_l + Nblock_p; Nl = Nblock_l*Nu Nr = Nblock_r*Nu # ev2.resize(Nr) ev2 = np.empty((Nr,), dtype) for b in range(Nblock_l, Nblock_r): self.blockStep(mat, lmd, lme, evec, w, w_copy, Nm, b,Nu) for u in range(Nu): for k in range(Nr): lmd2[u,k]=lmd[u,k] lme2[u,k]=lme[u,k] Qt = np.identity(Nr, ctype) # diagonalize t0 = g.time() # self.diagonalize(ev2, lme2, Nm, Qt) self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt) # def diagonalize(self, eval, lmd, lme, Nu, Nk, Nm, Qt): t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) # sort ev2_copy = ev2.copy() ev2 = list(reversed(sorted(ev2))) for i in range(Nr): g.message("Rval[%d]= %e"%(i,ev2[i])) # rotate # t0 = g.time() # g.rotate(evec, Qt, k1 - 1, k2 + 1, 0, Nm) # t1 = g.time() # if verbose: # g.message("Basis rotation took %g s" % (t1 - t0)) # convergence test if it >= self.params["Nminres"]: if verbose: g.message("Rotation to test convergence") # diagonalize for k in range(Nr): ev2[k] = ev[k] # lme2[k] = lme[k] for u in range(Nu): for k in range(Nr): lmd2[u,k]=lmd[u,k] lme2[u,k]=lme[u,k] Qt = np.identity(Nm, ctype) t0 = g.time() # self.diagonalize(ev2, lme2, Nk, Qt) self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt) t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) B = g.copy(evec[0]) allconv = True if beta_k >= self.params["betastp"]: jj = 1 while jj <= Nstop: j = Nstop - jj g.linear_combination(B, evec[0:Nr], Qt[j, 0:Nr]) g.message("norm=%e"%(g.norm2(B))) B *= 1.0 / g.norm2(B) ** 0.5 if not ckpt.load(v): mat(v, B) ckpt.save(v) ev_test = g.innerProduct(B, v).real eps2 = g.norm2(v - ev_test * B) / lambda_max ** 2.0 if verbose: g.message( "%-65s %-45s %-50s" % ( "ev[ %d ] = %s" % (j, ev2_copy[j]), "<B|M|B> = %s" % (ev_test), "|M B - ev B|^2 / ev_max^2 = %s" % (eps2), ) ) if eps2 > self.params["resid"]: allconv = False if jj == Nstop: break jj = min([Nstop, 2 * jj]) if allconv: if verbose: g.message("Converged in %d iterations" % it) break t0 = g.time() g.rotate(evec, Qt, 0, Nstop, 0, Nk) t1 = g.time() if verbose: g.message("Final basis rotation took %g s" % (t1 - t0)) return (evec[0:Nstop], ev2_copy[0:Nstop])
"order": 10, }) # implicitly restarted lanczos irl = g.algorithms.iterative.irl({ "Nk": 60, "Nstop": 60, "Nm": 80, "resid": 1e-8, "betastp": 0.0, "maxiter": 20, "Nminres": 7, # "maxapply" : 100 }) # start vector start = g.vspincolor(w.F_grid_eo) start[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) # generate eigenvectors evec, ev = irl(c(w.NDagN), start) # , g.checkpointer("checkpoint") # memory info g.meminfo() # print eigenvalues of NDagN as well for i, v in enumerate(evec): w.NDagN(v, start) l = g.innerProduct(v, start).real g.message("%d %g %g %g" % (i, l, ev[i], c(l)))
def blockStep(self, mat, lmd, lme, evec, w, w_copy, Nm, b, Nu): assert (b+1)*Nu <= Nm verbose = g.default.is_verbose("irl") ckpt = self.ckpt alph = 0.0 beta = 0.0 L= b*Nu R= (b+1)*Nu for k in range (L,R): if self.params["mem_report"]: g.mem_report(details=False) # compute t0 = g.time() if not ckpt.load(w[k-L]): mat(w[k-L], evec[k]) # mat(v, B) ckpt.save(w[k-L]) t1 = g.time() # allow to restrict maximal number of applications within run self.napply += 1 if "maxapply" in self.params: if self.napply == self.params["maxapply"]: if verbose: g.message("Maximal number of matrix applications reached") sys.exit(0) for u in range (Nu): for k in range (u,Nu): ip=g.innerProduct(evec[L+k],evec[L+u]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],evec[%d])=%e %e"% (L+k,L+u,ip.real,ip.imag)) if b > 0: for u in range (Nu): for k in range (L-Nu+u,L): w[u] -= np.conjugate(lme[u,k]) * evec[k] for k in range (L-Nu+u,L): ip=g.innerProduct(evec[k],w[u]) # if g.norm2(ip)>1e-6: if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (k,u,ip.real,ip.imag)) else: for u in range (Nu): g.message("norm(evec[%d])=%e"%(u,g.norm2(evec[u]))) for u in range (Nu): for k in range (L+u,R): lmd[u][k] = g.innerProduct(evec[k],w[u]) lmd[k-L][L+u]=np.conjugate(lmd[u][k]) lmd[u][L+u]=np.real(lmd[u][L+u]) for u in range (Nu): for k in range (L,R): w[u] -= lmd[u][k]*evec[k] for k in range (L,R): ip=g.innerProduct(evec[k],w[u]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (k,u,ip.real,ip.imag)) w_copy[u] = g.copy(w[u]); for u in range (Nu): for k in range (L,R): lme[u][k]=0.; for u in range (Nu): g.orthogonalize(w[u],evec[0:R]) w[u] *= 1.0 / g.norm2(w[u]) ** 0.5 for k in range (R): ip=g.innerProduct(evec[k],w[u]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (k,u,ip.real,ip.imag)) for u in range (Nu): g.orthogonalize(w[u],evec[0:R]) w[u] *= 1.0 / g.norm2(w[u]) ** 0.5 for k in range (R): ip=g.innerProduct(evec[k],w[u]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (k,u,ip.real,ip.imag)) for u in range (0,Nu): if u >0: g.orthogonalize(w[u],w[0:u]) w[u] *= 1.0 / g.norm2(w[u]) ** 0.5 for k in range (u): ip=g.innerProduct(w[k],w[u]) if np.abs(ip) >1e-6: g.message("inner(w[%d],w[%d])=%e %e"% (k,u,ip.real,ip.imag)) ip=g.innerProduct(w[u],w[u]) g.message("inner(w[%d],w[%d])=%e %e"% (u,u,ip.real,ip.imag)) for u in range (Nu): for v in range (u,Nu): lme[u][L+v] = g.innerProduct(w[u],w_copy[v]) lme[u][L+u] = np.real(lme[u][L+u]) t3 = g.time() for u in range (Nu): for k in range (L+u,R): g.message( " In block %d, beta[%d][%d]=%e %e" %( b, u, k-b*Nu,lme[u][k].real,lme[u][k].imag ) ) # ckpt.save([w, alph, beta]) if b < (Nm/Nu - 1): for u in range (Nu): evec[R+u] = g.copy(w[u]) ip=g.innerProduct(evec[R+u],evec[R+u]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],evec[%d])=%e %e"% (R+u,R+u,ip.real,ip.imag))
def __call__(self, mat, src, ckpt=None): # verbosity verbose = g.default.is_verbose("irl") # checkpointer if ckpt is None: ckpt = g.checkpointer_none() ckpt.grid = src.grid self.ckpt = ckpt # first approximate largest eigenvalue pit = g.algorithms.iterative.power_iteration({ "eps": 0.05, "maxiter": 10 }) lambda_max = pit(mat, src)[0] # parameters Nm = self.params["Nm"] Nk = self.params["Nk"] Nstop = self.params["Nstop"] assert (Nm >= Nk and Nstop <= Nk) # tensors dtype = np.float64 lme = np.empty((Nm, ), dtype) lme2 = np.empty((Nm, ), dtype) ev = np.empty((Nm, ), dtype) ev2 = np.empty((Nm, ), dtype) ev2_copy = np.empty((Nm, ), dtype) # fields f = g.lattice(src) v = g.lattice(src) evec = [g.lattice(src) for i in range(Nm)] # scalars k1 = 1 k2 = Nk Nconv = 0 beta_k = 0.0 # set initial vector evec[0] @= src / g.norm2(src)**0.5 # initial Nk steps for k in range(Nk): self.step(mat, ev, lme, evec, f, Nm, k) # restarting loop for it in range(self.params["maxiter"]): if verbose: g.message("Restart iteration %d" % it) for k in range(Nk, Nm): self.step(mat, ev, lme, evec, f, Nm, k) f *= lme[Nm - 1] # eigenvalues for k in range(Nm): ev2[k] = ev[k + k1 - 1] lme2[k] = lme[k + k1 - 1] # diagonalize t0 = g.time() Qt = np.identity(Nm, dtype) self.diagonalize(ev2, lme2, Nm, Qt) t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) # sort ev2_copy = ev2.copy() ev2 = list(reversed(sorted(ev2))) # implicitly shifted QR transformations Qt = np.identity(Nm, dtype) t0 = g.time() for ip in range(k2, Nm): g.qr_decomp(ev, lme, Nm, Nm, Qt, ev2[ip], k1, Nm) t1 = g.time() if verbose: g.message("QR took %g s" % (t1 - t0)) # rotate t0 = g.time() #test0=g.copy(evec[0]) #test60=g.copy(evec[60]) g.rotate(evec, Qt, k1 - 1, k2 + 1, 0, Nm) #g.rotate(evec,np.linalg.inv(Qt),k1-1,k2+1,0,Nm) #g.message(np.linalg.norm(np.linalg.inv(Qt) @ Qt - np.identity(Nm,dtype))) #g.message(g.norm2(test0-evec[0])/g.norm2(test0),k1-1,k2+1,Nm) #g.message(g.norm2(test60-evec[60])/g.norm2(test60)) #sys.exit(0) t1 = g.time() if verbose: g.message("Basis rotation took %g s" % (t1 - t0)) # compression f *= Qt[k2 - 1, Nm - 1] f += lme[k2 - 1] * evec[k2] beta_k = g.norm2(f)**0.5 betar = 1.0 / beta_k evec[k2] @= betar * f lme[k2 - 1] = beta_k if verbose: g.message("beta_k = ", beta_k) # convergence test if it >= self.params["Nminres"]: if verbose: g.message("Rotation to test convergence") # diagonalize for k in range(Nm): ev2[k] = ev[k] lme2[k] = lme[k] Qt = np.identity(Nm, dtype) t0 = g.time() self.diagonalize(ev2, lme2, Nk, Qt) t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) B = g.copy(evec[0]) allconv = True if beta_k >= self.params["betastp"]: jj = 1 while jj <= Nstop: j = Nstop - jj g.linear_combination(B, evec[0:Nk], Qt[j, 0:Nk]) B *= 1.0 / g.norm2(B)**0.5 if not ckpt.load(v): mat(B, v) ckpt.save(v) ev_test = g.innerProduct(B, v).real eps2 = g.norm2(v - ev_test * B) / lambda_max**2.0 if verbose: g.message( "%-65s %-45s %-50s" % ("ev[ %d ] = %s" % (j, ev2_copy[j]), "<B|M|B> = %s" % (ev_test), "|M B - ev B|^2 / ev_max^2 = %s" % (eps2))) if eps2 > self.params["resid"]: allconv = False if jj == Nstop: break jj = min([Nstop, 2 * jj]) if allconv: if verbose: g.message("Converged in %d iterations" % it) break t0 = g.time() g.rotate(evec, Qt, 0, Nstop, 0, Nk) t1 = g.time() if verbose: g.message("Final basis rotation took %g s" % (t1 - t0)) return (evec[0:Nstop], ev2_copy[0:Nstop])
def step(self, mat, lmd, lme, evec, w, Nm, k): assert (k < Nm) verbose = g.default.is_verbose("irl") ckpt = self.ckpt alph = 0.0 beta = 0.0 evec_k = evec[k] results = [w, alph, beta] if ckpt.load(results): w, alph, beta = results # use checkpoint if verbose: g.message("%-65s %-45s" % ("alpha[ %d ] = %s" % (k, alph), "beta[ %d ] = %s" % (k, beta))) else: # compute t0 = g.time() mat(evec_k, w) t1 = g.time() # allow to restrict maximal number of applications within run self.napply += 1 if "maxapply" in self.params: if self.napply == self.params["maxapply"]: if verbose: g.message( "Maximal number of matrix applications reached") sys.exit(0) if k > 0: w -= lme[k - 1] * evec[k - 1] zalph = g.innerProduct(evec_k, w) alph = zalph.real w -= alph * evec_k beta = g.norm2(w)**0.5 w /= beta t2 = g.time() if k > 0: g.orthogonalize(w, evec[0:k]) t3 = g.time() ckpt.save([w, alph, beta]) if verbose: g.message("%-65s %-45s %-50s" % ("alpha[ %d ] = %s" % (k, zalph), "beta[ %d ] = %s" % (k, beta), " timing: %g s (matrix), %g s (ortho)" % (t1 - t0, t3 - t2))) lmd[k] = alph lme[k] = beta if k < Nm - 1: evec[k + 1] @= w
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys # load configuration fine_grid = g.grid([8, 8, 8, 16], g.single) # basis n = 31 basis = [g.vcomplex(fine_grid, 30) for i in range(n)] rng = g.random("block_seed_string_13") rng.cnormal(basis) # gram-schmidt for i in range(n): basis[i] /= g.norm2(basis[i]) ** 0.5 g.orthogonalize(basis[i], basis[:i]) for j in range(i): eps = g.innerProduct(basis[j], basis[i]) g.message(" <%d|%d> =" % (j, i), eps) assert abs(eps) < 1e-6
def orthogonalize(w, basis, ips=None): for j, v in enumerate(basis): ip = gpt.innerProduct(v, w) w -= ip * v if ips is not None: ips[j] = ip