def chroma_sigma_star(prop_1, prop_2, prop_3, spm, polm, diquark): #diquark = quark_contract_13(gpt.eval(prop_1 * spm), gpt.eval(spm * prop_3)) contraction = gpt.trace(gpt.eval(polm * prop_2 * gpt.spin_trace(diquark))) diquark2 = quark_contract_24(prop_2, gpt.eval(spm * prop_3 * spm)) contraction += gpt.trace(gpt.eval(prop_1 * polm * diquark2)) diquark2 @= quark_contract_13(prop_1, gpt.eval(spm * prop_3)) contraction += gpt.trace(gpt.eval(polm * prop_2 * spm * diquark2)) return gpt.eval(contraction)
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "metropolis" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] step_size = self.params["step_size"] number_accept = 0 possible_accept = 0 t = g.timer("metropolis") t("action") action = g.component.real(g.eval(-g.trace(link * g.adj(staple)) * mask)) t("lattice") V = g.lattice(link) V_eye = g.identity(link) t("random") self.rng.element(V, scale=step_size, normal=True) t("update") V = g.where(mask, V, V_eye) link_prime = g.eval(V * link) action_prime = g.component.real( g.eval(-g.trace(link_prime * g.adj(staple)) * mask)) dp = g.component.exp(g.eval(action - action_prime)) rn = g.lattice(dp) t("random") self.rng.uniform_real(rn) t("random") accept = dp > rn accept *= mask number_accept += g.norm2(accept) possible_accept += g.norm2(mask) link @= g.where(accept, link_prime, link) t() g.project(link, project_method) # g.message(t) if verbose: g.message( f"Metropolis acceptance rate: {number_accept / possible_accept}" )
def baryon_decuplet_base_contraction(prop_1, prop_2, diquarks, pol_matrix): assert isinstance(diquarks, list) contraction = gpt.trace( gpt.eval(pol_matrix * gpt.color_trace(prop_2 * gpt.spin_trace(diquarks[0]))) + gpt.eval(pol_matrix * gpt.color_trace(prop_2 * diquarks[0])) ) contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_2 * diquarks[1]))) contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_1 * diquarks[2]))) contraction *= 2 contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_1 * gpt.spin_trace(diquarks[2])))) return contraction
def fundamental_to_adjoint(U_a, U_f): grid = U_f.grid T = U_f.otype.cartesian().generators(grid.precision.complex_dtype) V = {} for a in range(len(T)): for b in range(len(T)): V[a, b] = gpt.eval(2.0 * gpt.trace(T[a] * U_f * T[b] * gpt.adj(U_f))) gpt.merge_color(U_a, V)
def traceless_hermitian(src): if isinstance(src, list): return [traceless_hermitian(x) for x in src] src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src + 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
def energy_density(U, field=False): Nd = len(U) accumulator = accumulator_field if field else accumulator_average res = accumulator(U[0]) for mu in range(Nd): for nu in range(mu): Fmunu = field_strength(U, mu, nu) res += g.trace(Fmunu * Fmunu) return res.scaled_real(-1.0)
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: nhalf = len(gen) // 2 l_real = gpt.component.real(l) l_imag = gpt.component.imag(l) return [ gpt.eval(gpt.trace(gpt.adj(l_real) * Ta)) for Ta in gen[0:nhalf] ] + [ gpt.eval(gpt.trace(gpt.adj(l_imag) * Ta)) for Ta in gen[0:nhalf] ] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: return [gpt.eval(gpt.trace(gpt.adj(l) * Ta)) for Ta in gen] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def plaquette(U): # U[mu](x)*U[nu](x+mu)*adj(U[mu](x+nu))*adj(U[nu](x)) tr = 0.0 vol = float(U[0].grid.gsites) for mu in range(4): for nu in range(mu): tr += g.sum( g.trace(U[mu] * g.cshift(U[nu], mu, 1) * g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu]))) return 2. * tr.real / vol / 4. / 3. / 3.
def polyakov_loop(U, mu): # tr[ prod_j U_{\mu}(m, j) ] vol = float(U[0].grid.fsites) Nc = U[0].otype.Nc tmp_polyakov_loop = g.copy(U[mu]) for n in range(1, U[0].grid.fdimensions[mu]): tmp = g.cshift(tmp_polyakov_loop, mu, 1) tmp_polyakov_loop = g.eval(U[mu] * tmp) return g.sum(g.trace(tmp_polyakov_loop)) / Nc / vol
def project_onto_suN(dest, u_unprojected, params): t_total = -gpt.time() t_trace, t_projectstep = 0.0, 0.0 vol = dest.grid.fsites t_trace -= gpt.time() old_trace = gpt.sum(gpt.trace(dest * u_unprojected)).real / (vol * 3) t_trace += gpt.time() for _ in range(params["max_iteration"]): # perform a single projection step t_projectstep -= gpt.time() project_to_suN_step(dest, u_unprojected) t_projectstep += gpt.time() # calculate new trace t_trace -= gpt.time() new_trace = gpt.sum(gpt.trace(dest * u_unprojected)).real / (vol * 3) t_trace += gpt.time() epsilon = np.abs((new_trace - old_trace) / old_trace) gpt.message(f"APE iter {_}, epsilon: {epsilon}") if epsilon < params["accuracy"]: break old_trace = new_trace else: raise RuntimeError("Projection to SU(3) did not converge.") t_total += gpt.time() if gpt.default.is_verbose("project_onto_suN"): t_profiled = t_trace + t_projectstep t_unprofiled = t_total - t_profiled gpt.message("project_onto_suN: total", t_total, "s") gpt.message("project_onto_suN: t_trace", t_trace, "s", round(100 * t_trace / t_total, 1), "%") gpt.message("project_onto_suN: t_projectstep", t_projectstep, "s", round(100 * t_projectstep / t_total, 1), "%") gpt.message("project_onto_suN: unprofiled", t_unprofiled, "s", round(100 * t_unprofiled / t_total, 1), "%")
def Udelta_average(U): """ compute < tr Udelta * Udelta^\dagger > """ Volume = float(U[0].grid.fsites) Udelta = g.lattice(U[0].grid, U[0].otype) Udelta[:] = 0.0 for [i, j, k] in permutations([0, 1, 2]): Udelta += U[i] * g.cshift(U[j], i, 1) * g.cshift( g.cshift(U[k], i, 1), j, 1) return g.sum(g.trace(Udelta * g.adj(Udelta))).real / Volume / 36.0
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: norm = [numpy.trace(Ta.array @ Ta.array) for Ta in gen] return [ gpt.eval(gpt.trace(l * Ta) / n) for n, Ta in zip(norm, gen) ] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def plaquette(U): # U[mu](x)*U[nu](x+mu)*adj(U[mu](x+nu))*adj(U[nu](x)) tr = 0.0 vol = float(U[0].grid.fsites) Nd = len(U) ndim = U[0].otype.shape[0] for mu in range(Nd): for nu in range(mu): tr += g.sum( g.trace(U[mu] * g.cshift(U[nu], mu, 1) * g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu]))) return 2.0 * tr.real / vol / Nd / (Nd - 1) / ndim
def check_representation(U, eps_ref): generators = U.otype.generators(U.grid.precision.complex_dtype) # first test generators normalization for a in range(len(generators)): for b in range(len(generators)): eye_ab = 2.0 * g.trace(generators[a] * generators[b]) if a == b: assert abs(eye_ab - 1) < eps_ref else: assert abs(eye_ab) < eps_ref # now project to algebra and make sure it is a linear combination of # the provided generators algebra = g.matrix.log(U) algebra /= 1j n0 = g.norm2(algebra) for Ta in generators: algebra -= Ta * g.trace(algebra * Ta) * 2.0 eps = (g.norm2(algebra) / n0)**0.5 g.message(f"Test representation: {eps}") assert eps < eps_ref
def fundamental_to_adjoint(U_a, U_f): """ Convert fundamental to adjoint representation. For now only SU(2) is supported. Input: fundamental gauge field Output: adjoint gauge field """ grid = U_f.grid T = U_f.otype.generators(grid.precision.complex_dtype) V = {} for a in range(len(T)): for b in range(len(T)): V[a, b] = g.eval(2.0 * g.trace(T[a] * U_f * T[b] * g.adj(U_f))) g.merge_color(U_a, V)
def contract(pos, prop, tag, may_save_prop=True): t0 = pos[3] prop_tag = "%s/%s" % (tag, str(pos)) # save propagators if params["save_propagators"] and may_save_prop: output.write({prop_tag: prop}) output.flush() # create and save correlators for op_snk, op_src in correlators: G_snk = operators[op_snk] G_src = operators[op_src] corr = g.slice(g.trace(G_src * g.gamma[5] * g.adj(prop) * g.gamma[5] * G_snk * prop), 3) corr = corr[t0:] + corr[:t0] corr_tag = "%s/snk%s-src%s" % (prop_tag, op_snk, op_src) output_correlator.write(corr_tag, corr) g.message("Correlator %s\n" % corr_tag, corr)
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "su2_heat_bath" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] # params niter = self.params["niter"] # temporaries grid = link.grid u2 = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2)) u2_eye = g.identity(u2) one = g.identity(g.complex(grid)) zero = g.complex(grid) zero[:] = 0 eps = g.complex(grid) eps[:] = grid.precision.eps * 10.0 xr = [g.complex(grid) for i in range(4)] a = [g.complex(grid) for i in range(4)] two_pi = g.complex(grid) two_pi[:] = 2.0 * np.pi accepted = g.complex(grid) d = g.complex(grid) V_eye = g.identity(link) # pauli pauli1, pauli2, pauli3 = tuple([g.lattice(u2) for i in range(3)]) ident = g.identity(u2) pauli1[:] = 1j * np.array([[0, 1], [1, 0]], dtype=grid.precision.complex_dtype) pauli2[:] = 1j * np.array([[0, 1j], [-1j, 0]], dtype=grid.precision.complex_dtype) pauli3[:] = 1j * np.array([[1, 0], [0, -1]], dtype=grid.precision.complex_dtype) # counter num_sites = round(g.norm2(g.where(mask, one, zero))) # shortcuts inv = g.component.pow(-1.0) # go through subgroups for subgroup in link.otype.su2_subgroups(): V = g.eval(link * g.adj(staple)) # extract u2 subgroup following Kennedy/Pendleton link.otype.block_extract(u2, V, subgroup) u2 @= u2 - g.adj(u2) + g.identity(u2) * g.trace(g.adj(u2)) udet = g.matrix.det(u2) adet = g.component.abs(udet) nzmask = adet > eps u2 @= g.where(nzmask, u2, u2_eye) udet = g.where(nzmask, udet, one) xi = g.eval(0.5 * g.component.sqrt(udet)) u2 @= 0.5 * u2 * inv(xi) # make sure that su2 subgroup projection worked assert g.group.defect(u2) < u2.grid.precision.eps * 10.0 xi @= 2.0 * xi alpha = g.component.real(xi) # main loop it = 0 num_accepted = 0 accepted[:] = 0 d[:] = 0 while (num_accepted < num_sites) and (it < niter): self.rng.uniform_real(xr, min=0.0, max=1.0) xr[1] @= -g.component.log(xr[1]) * inv(alpha) xr[2] @= -g.component.log(xr[2]) * inv(alpha) xr[3] @= g.component.cos(g.eval(xr[3] * two_pi)) xr[3] @= xr[3] * xr[3] xrsq = g.eval(xr[2] + xr[1] * xr[3]) d = g.where(accepted, d, xrsq) thresh = g.eval(one - d * 0.5) xrsq @= xr[0] * xr[0] newly_accepted = g.where(xrsq < thresh, one, zero) accepted = g.where(mask, g.where(newly_accepted, newly_accepted, accepted), zero) num_accepted = round(g.norm2(g.where(accepted, one, zero))) it += 1 if verbose: g.message(f"SU(2)-heatbath update needed {it} / {niter} iterations") # update link a[0] @= g.where(mask, one - d, zero) a123mag = g.component.sqrt(g.component.abs(one - a[0] * a[0])) phi, cos_theta = g.complex(grid), g.complex(grid) self.rng.uniform_real([phi, cos_theta]) phi @= phi * two_pi cos_theta @= (cos_theta * 2.0) - one sin_theta = g.component.sqrt(g.component.abs(one - cos_theta * cos_theta)) a[1] @= a123mag * sin_theta * g.component.cos(phi) a[2] @= a123mag * sin_theta * g.component.sin(phi) a[3] @= a123mag * cos_theta ua = g.eval(a[0] * ident + a[1] * pauli1 + a[2] * pauli2 + a[3] * pauli3) b = g.where(mask, g.adj(u2) * ua, ident) link.otype.block_insert(V, b, subgroup) link @= g.where(accepted, V * link, link) # check check = g.where(accepted, ua * g.adj(ua) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, b * g.adj(b) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, V * g.adj(V) - V_eye, 0.0 * V_eye) delta = (g.norm2(check) / g.norm2(V_eye)) ** 0.5 assert delta < grid.precision.eps * 10.0 # project g.project(link, project_method)
) U = representation(grid) rng.element(U) check_unitarity(U, eps_ref) check_representation(U, eps_ref) ################################################################################ # Test su2 subalgebras ################################################################################ for eps_ref, grid in [(1e-12, grid_dp)]: U = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(3)) u2 = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2)) u2p = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2)) for sg in U.otype.su2_subgroups(): rng.element(u2) u2p = g.eval(u2 - g.adj(u2) + g.identity(u2) * g.trace(g.adj(u2))) eps = (g.norm2(u2 - u2p) / g.norm2(u2))**0.5 g.message(eps, g.norm2(u2), g.norm2(u2p)) U.otype.block_insert(U, u2, sg) u2p[:] = 0 U.otype.block_extract(u2p, U, sg) eps = (g.norm2(u2 - u2p) / g.norm2(u2))**0.5 g.message(eps, g.norm2(u2), g.norm2(u2p)) assert eps < eps_ref check_unitarity(U, eps_ref) check_representation(U, eps_ref)
src[:, :, :, 0] = val for x in range(grid.fdimensions[0]): for t in range(grid.fdimensions[3]): compare = val if t == 0 else zero eps = g.norm2(src[x, 0, 0, t] - compare) assert eps < 1e-13 # spin and color traces mc = g.eval(g.spin_trace(msc)) assert g.norm2(mc[0, 0, 0, 0] - g.spin_trace(msc[0, 0, 0, 0])) < 1e-13 ms = g.eval(g.color_trace(msc)) assert g.norm2(ms[0, 0, 0, 0] - g.color_trace(msc[0, 0, 0, 0])) < 1e-13 eps0 = g.norm2(g.trace(msc) - g.spin_trace(ms)) eps1 = g.norm2(g.trace(msc) - g.color_trace(mc)) assert eps0 < 1e-9 and eps1 < 1e-9 # create singlet by number assert g.complex(0.5).array[0] == 0.5 # test expression -> string conversion; # at this point only make sure that it # produces a string without failing g.message( f"Test string conversion of expression:\n{g.trace(0.5 * msc * msc - msc)}") # left and right multiplication of different data types with scalar mc = g.mcomplex(grid, ntest) for dti in [cv, cm, vsc, msc, vc, mc]:
eps2 = g.norm2(w * dst_eo1 - src) / g.norm2(src) g.message("Result of M M^-1 = 1 test: eps2=", eps2) assert eps2 < 1e-10 # and a reference if True: dst = g.mspincolor(grid) dst @= slv * src eps2 = g.norm2(dst_eo1 - dst) / g.norm2(dst_eo1) g.message("Result of test EO1 versus G5M: eps2=", eps2) assert eps2 < 1e-10 dst = dst_eo2 # two-point correlator = g.slice(g.trace(dst * g.adj(dst)), 3) # test value of correlator correlator_ref = [ 1.0710210800170898, 0.08988216519355774, 0.015699388459324837, 0.003721018321812153, 0.0010877142194658518, 0.0003579717595130205, 0.00012700144725386053, 5.180457083042711e-05, 3.406393443583511e-05, 5.2738148951902986e-05, 0.0001297977869398892, 0.0003634534077718854,
print(gre) sys.exit(0) # Calculate U^\dag U u = U[0][0, 1, 2, 3] v = g.vcolor([0, 1, 0]) g.message(g.adj(v)) g.message(g.adj(u) * u * v) gr = g.grid([2, 2, 2, 2], g.single) g.message(g.mspincolor(gr)[0, 0, 0, 0] * g.vspincolor(gr)[0, 0, 0, 0]) g.message(g.trace(g.mspincolor(gr)[0, 0, 0, 0])) # Expression including numpy array r = g.eval(u * U[0] + U[1] * u) g.message(g.norm2(r)) # test inner and outer products v = g.vspincolor([[0, 0, 0], [0, 0, 2], [0, 0, 0], [0, 0, 0]]) w = g.vspincolor([[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0]]) xx = v * g.adj(w) g.message(xx[1][3][2][0]) g.message(xx) g.message(g.adj(v) * v) g.message(g.transpose(v) * v)
dst @= P * l ref @= g.gamma[0] * g.gamma[1] * l - g.gamma[2] * g.gamma[3] * l eps = g.norm2(dst - ref) / g.norm2(l) g.message("Test Regular Expression: ", eps) assert eps == 0.0 # test algebra versus matrix for mu in [0, 1, 2, 3, 5, "I"]: for op in [ lambda a, b: a * b, lambda a, b: b * a, lambda a, b: g.spin_trace(a * b), lambda a, b: g.spin_trace(b * a), lambda a, b: g.color_trace(a * b), lambda a, b: g.color_trace(b * a), lambda a, b: g.trace(a * b), lambda a, b: g.trace(b * a), ]: dst_alg = g(op(g.gamma[mu], l)) dst_mat = g(op(g.gamma[mu].tensor(), l)) eps2 = g.norm2(dst_alg - dst_mat) / g.norm2(dst_mat) g.message(f"Algebra<>Matrix {mu}: {eps2}") assert eps2 < 1e-14 # reconstruct and test the gamma matrix elements for mu in g.gamma: gamma = g.gamma[mu] g.message("Test numpy matrix representation of", mu) gamma_mu_mat = np.identity(4, dtype=np.complex128) for j in range(4): c = g.vspin([1 if i == j else 0 for i in range(4)])
if "WORK_DIR" in os.environ: work_dir = os.environ["WORK_DIR"] else: work_dir = "." # request test files files = ["psrc-prop-0.field", "pion-corr.txt"] for f in files: gpt.repository.load(f"{work_dir}/{f}", f"gpt://tests/io/qlat/{f}") # load field prop = gpt.load(f"{work_dir}/psrc-prop-0.field") gpt.message("Grid from qlat propagator =", prop.grid) # calculate correlator corr_pion = gpt.slice(gpt.trace(gpt.adj(prop) * prop), 3) # load reference with open(f"{work_dir}/pion-corr.txt", "r") as f: txt = f.readlines() # read lines corresponding to real part of time slices and # check difference w.r.t. what we have loaded above for i in range(8): ref = float(txt[1 + i * 2].split(" ")[-1][:-1]) diff = abs(ref - corr_pion[i].real) assert diff < 1e-7 # propagator was computed in single precision gpt.message("Time slice %d difference %g" % (i, diff)) gpt.message("Test successful")
dst_qm = g.mspincolor(grid) dst_qz = g.mspincolor(grid) dst_qm @= slv_qm * src dst_qz @= slv_qz * src # test madwf src_sc = rng.cnormal(g.vspincolor(grid)) dst_madwf_sc = g(slv_madwf * src_sc) dst_dwf_sc = g(slv_qm * src_sc) eps2 = g.norm2(dst_madwf_sc - dst_dwf_sc) / g.norm2(dst_dwf_sc) g.message(f"MADWF test: {eps2}") assert eps2 < 5e-4 # two-point correlator_qm = g.slice(g.trace(dst_qm * g.adj(dst_qm)), 3) correlator_qz = g.slice(g.trace(dst_qz * g.adj(dst_qz)), 3) correlator_ref = [ 0.4873415231704712, 0.14763720333576202, 0.021136583760380745, 0.007964665070176125, 0.005833963863551617, 0.00796868372708559, 0.021054629236459732, 0.14703410863876343, ] # output eps_qm = 0.0 eps_qz = 0.0
# Test gauge invariance of R_2x1 R_2x1_transformed = g.qcd.gauge.rectangle(U_transformed, 2, 1) eps = abs(R_2x1 - R_2x1_transformed) g.message( f"R_2x1 before {R_2x1} and after {R_2x1_transformed} gauge transformation: {eps}" ) assert eps < 1e-13 # Without trace and real projection R_2x1_notp = g.qcd.gauge.rectangle(U_transformed, 2, 1, trace=False, real=False) eps = abs(g.trace(R_2x1_notp).real - R_2x1) g.message(f"R_2x1 no real and trace check: {eps}") assert eps < 1e-13 # Test field version R_2x1_field = g( g.sum(g.qcd.gauge.rectangle(U, 2, 1, field=True)) / U[0].grid.gsites) eps = abs(R_2x1 - R_2x1_field) g.message(f"R_2x1 field check: {eps}") assert eps < 1e-13 # Without trace and real projection and field R_2x1_notp = g.qcd.gauge.rectangle(U_transformed, 2, 1, trace=False,
# Test gauge invariance of R_2x1 R_2x1_transformed = g.qcd.gauge.rectangle(U_transformed, 2, 1) eps = abs(R_2x1 - R_2x1_transformed) g.message( f"R_2x1 before {R_2x1} and after {R_2x1_transformed} gauge transformation: {eps}" ) assert eps < 1e-13 # Test gauge covariance of staple rho = np.array([[0.0 if i == j else 0.1 for i in range(4)] for j in range(4)], dtype=np.float64) C = g.qcd.gauge.staple_sum(U, rho=rho) C_transformed = g.qcd.gauge.staple_sum(U_transformed, rho=rho) for mu in range(len(C)): q = g.sum(g.trace(C[mu] * g.adj(U[mu]))) / U[0].grid.gsites q_transformed = ( g.sum(g.trace(C_transformed[mu] * g.adj(U_transformed[mu]))) / U[0].grid.gsites) eps = abs(q - q_transformed) g.message( f"Staple q[{mu}] before {q} and after {q_transformed} gauge transformation: {eps}" ) assert eps < 1e-14 # Test stout smearing U_stout = U P_stout = [] for i in range(3): U_stout = g.qcd.gauge.smear.stout(U_stout, rho=0.1)
# test madwf with defect_correcting dst_madwf_dc_sc = g(slv_madwf_dc * src_sc) eps2 = g.norm2(dst_madwf_dc_sc - dst_dwf_sc) / g.norm2(dst_dwf_sc) g.message(f"MADWF defect_correcting test: {eps2}") assert eps2 < 1e-10 # propagator dst_qm = g.mspincolor(grid) dst_qz = g.mspincolor(grid) dst_qm @= slv_qm * src dst_qz @= slv_qz * src # two-point correlator_qm = g.slice( g.trace(g.gamma[0] * g.gamma[0] * dst_qm * g.gamma[0] * g.gamma[0] * g.adj(dst_qm)), 3, ) correlator_qz = g.slice(g.trace(dst_qz * g.adj(dst_qz)), 3) correlator_ref = [ 0.4873415231704712, 0.14763720333576202, 0.021136583760380745, 0.007964665070176125, 0.005833963863551617, 0.00796868372708559, 0.021054629236459732, 0.14703410863876343, ] # output
def project_to_traceless_anti_hermitian(src): src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src - 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
def __call__(self, V): V = g.util.from_list(V) return sum( [g.sum(g.trace(u)) for u in g.qcd.gauge.transformed(self.U, V)]).real * (-2.0)