def baryon_decuplet_base_contraction(prop_1, prop_2, diquarks, pol_matrix): assert isinstance(diquarks, list) contraction = gpt.trace( gpt.eval(pol_matrix * gpt.color_trace(prop_2 * gpt.spin_trace(diquarks[0]))) + gpt.eval(pol_matrix * gpt.color_trace(prop_2 * diquarks[0])) ) contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_2 * diquarks[1]))) contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_1 * diquarks[2]))) contraction *= 2 contraction += gpt.eval(gpt.trace(pol_matrix * gpt.color_trace(prop_1 * gpt.spin_trace(diquarks[2])))) return contraction
def inner_product_norm2(a, b): if type(a) == gpt.tensor and type(b) == gpt.tensor: return gpt.adj(a) * b, a.norm2() a = gpt.eval(a) b = gpt.eval(b) assert len(a.otype.v_idx) == len(b.otype.v_idx) r = [ cgpt.lattice_inner_product_norm2(a.v_obj[i], b.v_obj[i]) for i in a.otype.v_idx ] return ( sum([x[0] for x in r]), sum([x[1] for x in r]), ) # todo, make local version of this too
def inv(A): A = gpt.eval(A) assert type(A) == gpt.lattice A_inv = gpt.lattice(A) to_list = gpt.util.to_list cgpt.invert_matrix(to_list(A_inv), to_list(A)) return A_inv
def __init__(self, Nc): super().__init__(Nc, Nc, f"ot_matrix_su_n_fundamental_algebra({Nc})") self.ctab = { f"ot_matrix_su_n_fundamental_group({Nc})": lambda dst, src: gpt.eval(dst, gpt.matrix.exp(src * 1j)) } self.CA = Nc
def rank_inner_product(a, b, use_accelerator=True): return_list = (type(a) == list) or (type(b) == list) a = gpt.util.to_list(a) b = gpt.util.to_list(b) if type(a[0]) == gpt.tensor and type(b[0]) == gpt.tensor: res = numpy.array([[gpt.adj(x) * y for y in b] for x in a], dtype=numpy.complex128) else: a = [gpt.eval(x) for x in a] b = [gpt.eval(x) for x in b] otype = a[0].otype assert len(otype.v_idx) == len(b[0].otype.v_idx) res = cgpt.lattice_rank_inner_product(a, b, use_accelerator) if return_list: return res return gpt.util.to_num(res[0, 0])
def __init__(self, U, boundary_phases): self.nd = len(U) self.U = [gpt.copy(u) for u in U] self.L = U[0].grid.fdimensions if boundary_phases is not None: for mu in range(self.nd): last_slice = tuple([ self.L[mu] - 1 if mu == nu else slice(None, None, None) for nu in range(self.nd) ]) self.U[mu][ last_slice] = self.U[mu][last_slice] * boundary_phases[mu] # now take boundary_phase from params and apply here self.Udag = [gpt.eval(gpt.adj(u)) for u in self.U] def _forward(mu): def wrap(dst, src): dst @= self.U[mu] * gpt.cshift(src, mu, +1) return wrap def _backward(mu): def wrap(dst, src): dst @= gpt.cshift(self.Udag[mu] * src, mu, -1) return wrap self.forward = [ gpt.matrix_operator(mat=_forward(mu), inv_mat=_backward(mu)) for mu in range(self.nd) ] self.backward = [o.inv() for o in self.forward]
def __init__(self): super().__init__("ot_u_1_group()") self.ctab = { "ot_u_1_algebra()": lambda dst, src: gpt.eval(dst, gpt.component.log(src) / 1j) }
def det(A): A = gpt.eval(A) assert type(A) == gpt.lattice r = gpt.complex(A.grid) to_list = gpt.util.to_list cgpt.determinant(r.v_obj[0], to_list(A)) return r
def __call__(self, links, site_fields=[]): assert len(site_fields) == self.n_site_fields assert len(links) == self.dim buffers = self.cshifts(links + site_fields) for p in self.paths: d = [0 for mu in range(self.dim)] r = None # pr = prof.Profile(timer=lambda: g.time()*100000.0) # pr.enable() for mu, distance in p.path: for step in range(abs(distance)): factor = None if distance > 0: factor = buffers[self.link_indices[mu][tuple(d)]] d[mu] += distance // abs(distance) if distance < 0: factor = g.adj( buffers[self.link_indices[mu][tuple(d)]]) assert factor is not None if r is None: r = factor else: r = r * factor # pr.disable() # pr.print_stats(sort="cumulative") assert r is not None yield g.eval(r) for i in range(self.n_site_fields): site_fields_indices_i = self.site_fields_indices[i] for sfi in site_fields_indices_i: yield (sfi, buffers[site_fields_indices_i[sfi]])
def __call__(self, mat): inverter_mat = [ self.inverter( g.matrix_operator( mat=lambda dst, src, s_val=s: g.eval(dst, mat * src + s_val * src), accept_list=True, ) ) for s in self.shifts ] @self.timed_function def inv(dst, src, t): for j, i in enumerate(inverter_mat): i(dst[j * len(src) : (j + 1) * len(src)], src) vector_space = None if isinstance(mat, g.matrix_operator): vector_space = mat.vector_space return g.matrix_operator( mat=inv, vector_space=vector_space, accept_guess=(True, False), accept_list=lambda src: len(src) * len(self.shifts), )
def log(i, convergence_threshold=0.5): i = gpt.eval(i) # i = n*(1 + x), log(i) = log(n) + log(1+x) # x = i/n - 1, |x|^2 = <i/n - 1, i/n - 1> = |i|^2/n^2 + |1|^2 - (<i,1> + <1,i>)/n # d/dn |x|^2 = -2 |i|^2/n^3 + (<i,1> + <1,i>)/n^2 = 0 -> 2|i|^2 == n (<i,1> + <1,i>) if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) lI = gpt.identity(gpt.lattice(x)) n = gpt.norm2(x) / gpt.inner_product(x, lI).real x /= n x -= lI n2 = gpt.norm2(x)**0.5 / x.grid.gsites order = 8 * int(16 / (-numpy.log10(n2))) assert n2 < convergence_threshold o = gpt.copy(x) xn = gpt.copy(x) for j in range(2, order + 1): xn @= xn * x o -= xn * ((-1.0)**j / j) o += lI * numpy.log(n) if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r return o
def norm2(l): if type(l) == gpt.tensor: return l.norm2() l = gpt.eval(l) if type(l) == gpt.lattice: return sum([cgpt.lattice_norm2(o) for o in l.v_obj]) else: assert (0)
def _Meooe(self, dst, src): assert dst != src cb = src.checkerboard() scb = self.checkerboard[cb] scbi = self.checkerboard[cb.inv()] dst.checkerboard(cb.inv()) dst[:] = 0 for mu in range(self.nd): src_plus = g.eval(scbi.forward[mu] * src) src_minus = g.eval(scb.backward[mu] * src) if mu == self.nd - 1: cc = 1.0 else: cc = self.nu / self.xi_0 dst += (cc / 2.0 * (g.gamma[mu] - g.gamma["I"]) * src_plus - cc / 2.0 * (g.gamma[mu] + g.gamma["I"]) * src_minus) self.apply_boundaries(dst)
def cshift(first, second, third, fourth=None): if (type(first) == gpt.lattice and type(second) == gpt.lattice and fourth is not None): t = first l = gpt.eval(second) d = third o = fourth else: l = gpt.eval(first) d = second o = third t = gpt.lattice(l) for i in t.otype.v_idx: cgpt.cshift(t.v_obj[i], l.v_obj[i], d, o) return t
def __init__(self, Nc): super().__init__(Nc, Nc * Nc - 1, f"ot_matrix_su_n_adjoint_group({Nc})") self.ctab = { f"ot_matrix_su_n_adjoint_algebra({Nc})": lambda dst, src: gpt.eval(dst, gpt.matrix.log(src) / 1j) }
def trace(l, t=None): if isinstance(l, gpt.expr): l = gpt.eval(l) if t is None: t = gpt.expr_unary.BIT_SPINTRACE | gpt.expr_unary.BIT_COLORTRACE if type(l) == gpt.tensor: return l.trace(t) return gpt.expr(l, t)
def p2p_dbard(src, Pol_i, Spin_M, t_sink): tmp_seq_src = g.lattice(src) q1_tmp = g.eval(Pol_i * src * Spin_M) # e.g. Pol_i * D * Cg5 q2_tmp = g.eval(Spin_M * src) # e.g. Cg5 * D tmp_seq_src = -qC.quarkContract14(q1_tmp, q2_tmp) q1_tmp = g.eval(q2_tmp * Spin_M) # e.g. Cg5 * D * Cg5 q2_tmp = g.eval(src * Pol_i) # e.g. D * Pol_i tmp_seq_src -= g.spin_transpose(qC.quarkContract12(q2_tmp, q1_tmp)) tmp_seq_src = g.eval(g.gamma[5] * g.adj(tmp_seq_src) * g.gamma[5]) seq_src = g.lattice(src) seq_src[:] = 0 seq_src[:, :, :, t_sink] = tmp_seq_src[:, :, :, t_sink] return seq_src
def split_chiral(basis, factor=None): nb = len(basis) factor = 0.5 if factor is None else factor g5 = gamma5(basis[0]) tmp = gpt.lattice(basis[0]) for n in range(nb): tmp @= g5 * basis[n] basis.append(gpt.eval((basis[n] - tmp) * factor)) basis[n] @= (basis[n] + tmp) * factor
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: return [gpt.eval(gpt.trace(gpt.adj(l) * Ta)) for Ta in gen] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def fundamental_to_adjoint(U_a, U_f): grid = U_f.grid T = U_f.otype.cartesian().generators(grid.precision.complex_dtype) V = {} for a in range(len(T)): for b in range(len(T)): V[a, b] = gpt.eval(2.0 * gpt.trace(T[a] * U_f * T[b] * gpt.adj(U_f))) gpt.merge_color(U_a, V)
def __init__(self, Nc): super().__init__(Nc, Nc, f"ot_matrix_su_n_fundamental_group({Nc})") self.ctab = { f"ot_matrix_su_n_adjoint_group({Nc})": fundamental_to_adjoint, f"ot_matrix_su_n_fundamental_algebra({Nc})": lambda dst, src: gpt.eval(dst, gpt.matrix.log(src) / 1j), }
def coordinates(self, l, c=None): assert l.otype.__name__ == self.__name__ gen = self.generators(l.grid.precision.complex_dtype) if c is None: nhalf = len(gen) // 2 l_real = gpt.component.real(l) l_imag = gpt.component.imag(l) return [ gpt.eval(gpt.trace(gpt.adj(l_real) * Ta)) for Ta in gen[0:nhalf] ] + [ gpt.eval(gpt.trace(gpt.adj(l_imag) * Ta)) for Ta in gen[0:nhalf] ] else: l[:] = 0 for ca, Ta in zip(c, gen): l += ca * Ta
def traceless_hermitian(src): if isinstance(src, list): return [traceless_hermitian(x) for x in src] src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src + 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
def contract_lambda_to_sigma_zero(prop_up, prop_down, prop_strange, spin_matrix, pol_matrix, diquarks=None): if diquarks is None: diquarks = [] prop_dict = {"up": prop_up, "down": prop_down, "strange": prop_strange} for diquark_flavors in [ "up_down", "down_up", "strange_up", "strange_down" ]: flav1, flav2 = diquark_flavors.split("_") diquarks.append(quark_contract_13( gpt.eval(prop_dict[flav1] * spin_matrix), gpt.eval(spin_matrix * prop_dict[flav2]) )) return ( 2 * baryon_octet_base_contraction(prop_strange, diquarks[0], pol_matrix) - 2 * baryon_octet_base_contraction(prop_strange, diquarks[1], pol_matrix) + baryon_octet_base_contraction(prop_down, diquarks[2], pol_matrix) - baryon_octet_base_contraction(prop_up, diquarks[3], pol_matrix) ) / sqrt(12)
def test(slv, name): t0 = g.time() dst = g.eval(slv * src) t1 = g.time() eps2 = g.norm2(dst_cg - dst) / g.norm2(dst_cg) g.message("%s finished: eps^2(CG) = %g" % (name, eps2)) timings[name] = t1 - t0 resid[name] = eps2**0.5 assert eps2 < 5e-7
def __init__(self, U, params): if "mass" in params: assert (not "kappa" in params) self.kappa = 1. / (params["mass"] + 4.) / 2. else: self.kappa = params["kappa"] self.U = U self.Udag = [g.eval(g.adj(u)) for u in U]
def polyakov_loop(U, mu): # tr[ prod_j U_{\mu}(m, j) ] vol = float(U[0].grid.fsites) Nc = U[0].otype.Nc tmp_polyakov_loop = g.copy(U[mu]) for n in range(1, U[0].grid.fdimensions[mu]): tmp = g.cshift(tmp_polyakov_loop, mu, 1) tmp_polyakov_loop = g.eval(U[mu] * tmp) return g.sum(g.trace(tmp_polyakov_loop)) / Nc / vol
def inv(psi, src): # verbosity verbose = g.default.is_verbose("dci") t_start = g.time() # leading order n = len(src) _s = [g.copy(x) for x in src] for j in range(n): psi[j][:] = 0 self.history = [] for i in range(self.maxiter): # correction step t0 = g.time() _d = g.eval(inner_inv_mat * _s) t1 = g.time() for j in range(n): _s[j] -= outer_mat * _d[j] t2 = g.time() for j in range(n): psi[j] += _d[j] # true resid eps = max([ g.norm2(outer_mat * psi[j] - src[j])**0.5 for j in range(n) ]) self.history.append(eps) if verbose: g.message( "Defect-correcting inverter: eps[", i, "] =", eps, ". Timing:", t1 - t0, "s (innver_inv), ", t2 - t1, "s (outer_mat)", ) if eps < self.eps: if verbose: g.message( "Defect-correcting inverter: converged at iteration", i, "after", g.time() - t_start, "s", ) break
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "local_metropolis" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] step_size = self.params["step_size"] number_accept = 0 possible_accept = 0 t = g.timer("local_metropolis") t("action") action = g.component.real(-g.trace(link * g.adj(staple)) * mask) t("lattice") V = g.lattice(link) V_eye = g.identity(link) t("random") self.rng.normal_element(V, scale=step_size) t("update") V = g.where(mask, V, V_eye) link_prime = g.eval(V * link) action_prime = g.component.real(-g.trace(link_prime * g.adj(staple)) * mask) dp = g.component.exp(action - action_prime) rn = g.lattice(dp) t("random") self.rng.uniform_real(rn) t("random") accept = dp > rn accept *= mask number_accept += g.norm2(accept) possible_accept += g.norm2(mask) link @= g.where(accept, link_prime, link) t() g.project(link, project_method) # g.message(t) if verbose: g.message( f"Local metropolis acceptance rate: {number_accept / possible_accept}" )
def perform(self, root): global current_config, current_light_quark if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) if (current_light_quark is not None and current_light_quark.evec_dir != self.evec_dir): current_light_quark = None if current_light_quark is None: current_light_quark = light_quark(current_config, self.evec_dir) prop_l = { "sloppy": current_light_quark.prop_l_sloppy, "exact": current_light_quark.prop_l_exact, }[self.solver] vcj = g.load(f"{root}/{self.conf}/pm_basis/basis") c = g.coordinates(vcj[0]) c = c[c[:, 3] == self.t] g.message( f"t = {self.t}, ilist = {self.ilist}, basis size = {len(vcj)}, solver = {self.solver}" ) root_job = f"{root}/{self.name}" output = g.gpt_io.writer(f"{root_job}/propagators") # create sources srcD = [ g.vspincolor(current_config.l_exact.U_grid) for spin in range(4) ] for i in self.ilist: for spin in range(4): srcD[spin][:] = 0 srcD[spin][c, spin, :] = vcj[i][c] g.message("Norm of source:", g.norm2(srcD[spin])) if i == 0: g.message("Source at origin:", srcD[spin][0, 0, 0, 0]) g.message("Source at time-origin:", srcD[spin][0, 0, 0, self.t]) prop = g.eval(prop_l * srcD) g.mem_report(details=False) for spin in range(4): output.write( {f"t{self.t}s{spin}c{i}_{self.solver}": prop[spin]}) output.flush()