def field_strength(U, mu, nu): assert mu != nu # v = staple_up - staple_down v = g.eval( g.cshift(U[nu], mu, 1) * g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu]) - g.cshift(g.adj(g.cshift(U[nu], mu, 1)) * g.adj(U[mu]) * U[nu], nu, -1) ) F = g.eval(U[mu] * v + g.cshift(v * U[mu], mu, -1)) F @= 0.125 * (F - g.adj(F)) return F
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "local_metropolis" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] step_size = self.params["step_size"] number_accept = 0 possible_accept = 0 t = g.timer("local_metropolis") t("action") action = g.component.real(-g.trace(link * g.adj(staple)) * mask) t("lattice") V = g.lattice(link) V_eye = g.identity(link) t("random") self.rng.normal_element(V, scale=step_size) t("update") V = g.where(mask, V, V_eye) link_prime = g.eval(V * link) action_prime = g.component.real(-g.trace(link_prime * g.adj(staple)) * mask) dp = g.component.exp(action - action_prime) rn = g.lattice(dp) t("random") self.rng.uniform_real(rn) t("random") accept = dp > rn accept *= mask number_accept += g.norm2(accept) possible_accept += g.norm2(mask) link @= g.where(accept, link_prime, link) t() g.project(link, project_method) # g.message(t) if verbose: g.message( f"Local metropolis acceptance rate: {number_accept / possible_accept}" )
def inv(dst_outer, src_outer): Ls_inner = dwf_inner.F_grid.fdimensions[0] zero4d = g.lattice(dwf_outer.U_grid, src_outer.otype) zero4d[:] = 0 c_s = sep(g.adj(P) * inv_dwf_outer_pv * src_outer) y0prime = sep( g.adj(P) * inv_dwf_inner * dwf_inner_pv * P * mrg([c_s[0]] + [zero4d] * (Ls_inner - 1)))[0] dst_outer @= P * mrg([y0prime] + sep( g.adj(P) * inv_dwf_outer_pv * dwf_outer * P * mrg([g(-y0prime)] + c_s[1:]))[1:])
def project(self, U, method): if method == "defect_right" or method == "defect": I = gpt.identity(U) eps = gpt.eval(0.5 * gpt.adj(U) * U - 0.5 * I) U @= U * (I - eps) elif method == "defect_left": I = gpt.identity(U) eps = gpt.eval(0.5 * U * gpt.adj(U) - 0.5 * I) U @= (I - eps) * U else: raise Exception("Unknown projection method")
def plaquette(U): # U[mu](x)*U[nu](x+mu)*adj(U[mu](x+nu))*adj(U[nu](x)) tr = 0.0 vol = float(U[0].grid.fsites) Nd = len(U) ndim = U[0].otype.shape[0] for mu in range(Nd): for nu in range(mu): tr += g.sum( g.trace(U[mu] * g.cshift(U[nu], mu, 1) * g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu]))) return 2.0 * tr.real / vol / Nd / (Nd - 1) / ndim
def __call__(self, links, site_fields=[]): assert len(site_fields) == self.n_site_fields assert len(links) == self.dim buffers = self.cshifts(links + site_fields) for p in self.paths: d = [0 for mu in range(self.dim)] r = None # pr = prof.Profile(timer=lambda: g.time()*100000.0) # pr.enable() for mu, distance in p.path: for step in range(abs(distance)): factor = None if distance > 0: factor = buffers[self.link_indices[mu][tuple(d)]] d[mu] += distance // abs(distance) if distance < 0: factor = g.adj( buffers[self.link_indices[mu][tuple(d)]]) assert factor is not None if r is None: r = factor else: r = r * factor # pr.disable() # pr.print_stats(sort="cumulative") assert r is not None yield g.eval(r) for i in range(self.n_site_fields): site_fields_indices_i = self.site_fields_indices[i] for sfi in site_fields_indices_i: yield (sfi, buffers[site_fields_indices_i[sfi]])
def is_element(self, U): I = gpt.identity(U) I_s = gpt.identity(gpt.complex(U.grid)) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) err2 += gpt.norm2(gpt.matrix.det(U) - I_s) / gpt.norm2(I_s) # consider additional determinant check return err2**0.5 < U.grid.precision.eps * 10.0
def check_unitarity(U, eps_ref): eye = g.lattice(U) eye[:] = np.eye(U.otype.shape[0], dtype=U.grid.precision.complex_dtype) eps = (g.norm2(U * g.adj(U) - eye) / g.norm2(eye))**0.5 g.message(f"Test unitarity: {eps}") assert eps < eps_ref U.otype.is_element(U)
def __init__(self, U, boundary_phases): self.nd = len(U) self.U = [gpt.copy(u) for u in U] self.L = U[0].grid.fdimensions if boundary_phases is not None: for mu in range(self.nd): last_slice = tuple([ self.L[mu] - 1 if mu == nu else slice(None, None, None) for nu in range(self.nd) ]) self.U[mu][ last_slice] = self.U[mu][last_slice] * boundary_phases[mu] # now take boundary_phase from params and apply here self.Udag = [gpt.eval(gpt.adj(u)) for u in self.U] def _forward(mu): def wrap(dst, src): dst @= self.U[mu] * gpt.cshift(src, mu, +1) return wrap def _backward(mu): def wrap(dst, src): dst @= gpt.cshift(self.Udag[mu] * src, mu, -1) return wrap self.forward = [ gpt.matrix_operator(mat=_forward(mu), inv_mat=_backward(mu)) for mu in range(self.nd) ] self.backward = [o.inv() for o in self.forward]
def perform(self, root): global basis_size, sloppy_per_job, T, current_config if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) output_correlator = g.corr_io.writer(f"{root}/{self.name}/head.dat") # <np,sp| D^{-1} Gamma D^{-1} |n,s> = < (D^{-1})^\dagger |np,sp> | Gamma | D^{-1} |n,s > > # = < Gamma5 D^{-1} Gamma5 |np,sp> | Gamma | D^{-1} |n,s > > # = < D^{-1} |np,sp> | Gamma5 Gamma | D^{-1} |n,s > > gamma5_sign[sp] gamma5_sign = [1.0, 1.0, -1.0, -1.0] indices = [0, 1, 2, 5] prec = {"sloppy": 0, "exact": 1}[self.solver] for i0 in range(0, basis_size, sloppy_per_job): half_peramb_i = {} for l in g.load( f"{root}/{self.conf}/pm_{self.solver}_t{self.t}_i{i0}/propagators" ): for x in l: half_peramb_i[x] = l[x] for j0 in range(0, basis_size, sloppy_per_job): if j0 == i0: half_peramb_j = half_peramb_i else: half_peramb_j = {} for l in g.load( f"{root}/{self.conf}/pm_{self.solver}_t{self.t}_i{j0}/propagators" ): for x in l: half_peramb_j[x] = l[x] for i in range(i0, i0 + sloppy_per_job): for spin in range(4): g.message(i, spin) hp_i = half_peramb_i[ f"t{self.t}s{spin}c{i}_{self.solver}"] for mu in indices: hp_i_gamma = g(g.gamma[5] * g.gamma[mu] * hp_i) for spin_prime in range(4): slc_j = [ g(gamma5_sign[spin_prime] * g.adj(half_peramb_j[ f"t{self.t}s{spin_prime}c{j}_{self.solver}"] ) * hp_i_gamma) for j in range(j0, j0 + sloppy_per_job) ] slc = g.slice(slc_j, 3) for j in range(j0, j0 + sloppy_per_job): output_correlator.write( f"output/G{mu}_prec{prec}/n_{j}_{i}_s_{spin_prime}_{spin}_t_{self.t}", slc[j - j0], ) output_correlator.close()
def correlate(a, b, dims=None): # c[x] = (1/vol) sum_y a[y]*adj(b[y+x]) F = gpt.fft(dims=dims) if dims is not None: norm = numpy.prod([a.grid.gdimensions[d] for d in dims]) else: norm = a.grid.fsites return F(gpt(float(norm) * F(a) * gpt.adj(F(b))))
def scaled_project(self, scale, real): if g.util.is_num(self.value): return scale * (self.value.real if real else self.value) else: if real: return g((g.adj(self.value) + self.value) * (scale / 2.0)) else: return g(self.value * scale)
def staple(U, mu): st = g.lattice(U[0]) st[:] = 0 Nd = len(U) for nu in range(Nd): if mu != nu: st += g.qcd.gauge.staple(U, mu, nu) return g(g.adj(st))
def conserved_vector_current(self, psi, psi_bar, mu, psi_bar_flavor=None): assert self.params["xi_0"] == 1.0 and self.params["nu"] == 1.0 psi_shift = self.covariant_shift() if psi_bar_flavor is None: psi_bar_flavor = self psi_bar_shift = psi_bar_flavor.covariant_shift() return gpt( +0.5 * psi_bar * (gpt.gamma[mu].tensor() - gpt.gamma["I"].tensor()) * psi_shift.forward[mu] * psi + 0.5 * gpt.adj(psi_bar_shift.forward[mu](gpt.adj(psi_bar))) * (gpt.gamma[mu].tensor() + gpt.gamma["I"].tensor()) * psi )
def traceless_hermitian(src): if isinstance(src, list): return [traceless_hermitian(x) for x in src] src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src + 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: return [gpt.eval(gpt.trace(gpt.adj(l) * Ta)) for Ta in gen] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def fundamental_to_adjoint(U_a, U_f): grid = U_f.grid T = U_f.otype.cartesian().generators(grid.precision.complex_dtype) V = {} for a in range(len(T)): for b in range(len(T)): V[a, b] = gpt.eval(2.0 * gpt.trace(T[a] * U_f * T[b] * gpt.adj(U_f))) gpt.merge_color(U_a, V)
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: nhalf = len(gen) // 2 l_real = gpt.component.real(l) l_imag = gpt.component.imag(l) return [ gpt.eval(gpt.trace(gpt.adj(l_real) * Ta)) for Ta in gen[0:nhalf] ] + [ gpt.eval(gpt.trace(gpt.adj(l_imag) * Ta)) for Ta in gen[0:nhalf] ] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def correlate_test_4d(a, b, x): # c[x] = (1/vol) sum_y a[y]*adj(b[y+x]) bprime = g(g.adj(b)) L = a.grid.gdimensions vol = L[0] * L[1] * L[2] * L[3] for i in range(4): # see core test: dst = g.cshift(src, 0, 1) -> dst[x] = src[x+1] bprime = g.cshift(bprime, i, x[i]) # bprime[y] = b[y+x] return g.sum(a * bprime) / vol
def divergence(f, current): resN = g.lattice(f) resN[:] = 0 b = g(g.gamma[5] * g.adj(f) * g.gamma[5]) for mu in range(4): c_mu = current(f, b, mu) resN += c_mu - g.cshift(c_mu, mu, -1) return g.norm2(resN)
def __init__(self, U, params): if "mass" in params: assert (not "kappa" in params) self.kappa = 1. / (params["mass"] + 4.) / 2. else: self.kappa = params["kappa"] self.U = U self.Udag = [g.eval(g.adj(u)) for u in U]
def innerProduct(a, b): if type(a) == gpt.tensor and type(b) == gpt.tensor: return gpt.adj(a) * b a = gpt.eval(a) b = gpt.eval(b) assert (len(a.otype.v_idx) == len(b.otype.v_idx)) return sum([ cgpt.lattice_innerProduct(a.v_obj[i], b.v_obj[i]) for i in a.otype.v_idx ])
def stout_general(U, params): nd = len(U) C = g.qcd.gauge.staple_sum(U, params) U_prime = [] for mu in range(nd): U_mu_prime = g( g.matrix.exp( g.qcd.gauge.project.traceless_anti_hermitian( C[mu] * g.adj(U[mu]))) * U[mu]) U_prime.append(U_mu_prime) return U_prime
def Udelta_average(U): """ compute < tr Udelta * Udelta^\dagger > """ Volume = float(U[0].grid.fsites) Udelta = g.lattice(U[0].grid, U[0].otype) Udelta[:] = 0.0 for [i, j, k] in permutations([0, 1, 2]): Udelta += U[i] * g.cshift(U[j], i, 1) * g.cshift( g.cshift(U[k], i, 1), j, 1) return g.sum(g.trace(Udelta * g.adj(Udelta))).real / Volume / 36.0
def __call__(self, phi): J = None act = 0.0 for p in g.core.util.to_list(phi): if J is None: J = g.lattice(p) J[:] = 0 for mu in range(p.grid.nd): J += g.cshift(p, mu, 1) act += -2.0 * self.kappa * g.inner_product(J, g.adj(p)).real p2 = g.norm2(p) act += p2 if self.l != 0.0: p4 = g.norm2(p * g.adj(p)) act += self.l * (p4 - 2.0 * p2 + p.grid.fsites) return act
def innerProductNorm2(a, b): if type(a) == gpt.tensor and type(b) == gpt.tensor: return gpt.adj(a) * b, a.norm2() a = gpt.eval(a) b = gpt.eval(b) assert (len(a.otype.v_idx) == len(b.otype.v_idx)) r = [ cgpt.lattice_innerProductNorm2(a.v_obj[i], b.v_obj[i]) for i in a.otype.v_idx ] return sum([x[0] for x in r]), sum([x[1] for x in r])
def verify_matrix_element(mat, dst, src, tag): src_prime = g.eval(mat * src) dst.checkerboard(src_prime.checkerboard()) X = g.inner_product(dst, src_prime) eps_ref = src.grid.precision.eps * 50.0 if mat.adj_mat is not None: X_from_adj = g.inner_product(src, g.adj(mat) * dst).conjugate() eps = abs(X - X_from_adj) / abs(X) g.message(f"Test adj({tag}): {eps}") assert eps < eps_ref if mat.inv_mat is not None: eps = (g.norm2(src - mat * g.inv(mat) * src) / g.norm2(src))**0.5 g.message(f"Test inv({tag}): {eps}") assert eps < eps_ref Y = g.inner_product(dst, g.inv(g.adj(mat)) * src) Y_from_adj = g.inner_product(src, g.inv(mat) * dst).conjugate() eps = abs(Y - Y_from_adj) / abs(Y) g.message(f"Test adj(inv({tag})): {eps}") assert eps < eps_ref return X
def project(self, U, method): if method == "defect_right" or method == "defect": # V = V0(1 + eps) with dag(eps) = eps , dag(V0) V0 = 1 # dag(V) V - 1 = (1+eps)(1+eps) - 1 = 2eps + O(eps^2) # Multiply from right with 1 - eps = 1 - 1/2 (dag(V)V-1) # det(V) = 1 + Tr(eps) = 1 + 1/2 Tr(dag(V) V - 1) # Multiply with 1 - Tr(eps) U *= gpt.component.pow(-1.0 / self.Nc)(gpt.matrix.det(U)) I = gpt.identity(U) eps = gpt.eval(0.5 * gpt.adj(U) * U - 0.5 * I) U @= U * (I - eps) elif method == "defect_left": # V = (1 + eps)V0 with dag(eps) = eps , dag(V0) V0 = 1 # V dag(V) - 1 = (1+eps)(1+eps) - 1 = 2eps + O(eps^2) # Multiply from left with 1 - eps = 1 - 1/2 (V dag(V)-1) U *= gpt.component.pow(-1.0 / self.Nc)(gpt.matrix.det(U)) I = gpt.identity(U) eps = gpt.eval(0.5 * U * gpt.adj(U) - 0.5 * I) U @= (I - eps) * U else: raise Exception("Unknown projection method")
def conserved_vector_current(self, psi_left, psi_right, mu, psi_left_flavor=None): assert self.params["xi_0"] == 1.0 and self.params["nu"] == 1.0 psi_right_shift = self.covariant_shift() if psi_left_flavor is None: psi_left_flavor = self psi_left_shift = psi_left_flavor.covariant_shift() assert not self.daggered psi_left_bar = gpt(gpt.gamma[5] * gpt.adj(psi_left) * gpt.gamma[5]) return gpt( +0.5 * psi_left_bar * (gpt.gamma[mu].tensor() - gpt.gamma["I"].tensor()) * psi_right_shift.forward[mu] * psi_right + 0.5 * gpt.adj(psi_left_shift.forward[mu](gpt.adj(psi_left_bar))) * (gpt.gamma[mu].tensor() + gpt.gamma["I"].tensor()) * psi_right)
def __call__(self, fields): nd = fields[0].grid.nd U = fields[0:nd] C = g.qcd.gauge.staple_sum(U, rho=get_rho(U, self.params)) U_prime = [] for mu in range(nd): U_mu_prime = g( g.matrix.exp( g.qcd.gauge.project.traceless_anti_hermitian( C[mu] * g.adj(U[mu]))) * U[mu]) U_prime.append(U_mu_prime) return U_prime + fields[nd:]