def is_element(self, U): I = gpt.identity(U) I_s = gpt.identity(gpt.complex(U.grid)) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) err2 += gpt.norm2(gpt.matrix.det(U) - I_s) / gpt.norm2(I_s) # consider additional determinant check return err2**0.5 < U.grid.precision.eps * 10.0
def project(self, U, method): if method == "defect_right" or method == "defect": I = gpt.identity(U) eps = gpt.eval(0.5 * gpt.adj(U) * U - 0.5 * I) U @= U * (I - eps) elif method == "defect_left": I = gpt.identity(U) eps = gpt.eval(0.5 * U * gpt.adj(U) - 0.5 * I) U @= (I - eps) * U else: raise Exception("Unknown projection method")
def log(i, convergence_threshold=0.5): i = gpt.eval(i) # i = n*(1 + x), log(i) = log(n) + log(1+x) # x = i/n - 1, |x|^2 = <i/n - 1, i/n - 1> = |i|^2/n^2 + |1|^2 - (<i,1> + <1,i>)/n # d/dn |x|^2 = -2 |i|^2/n^3 + (<i,1> + <1,i>)/n^2 = 0 -> 2|i|^2 == n (<i,1> + <1,i>) if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) lI = gpt.identity(gpt.lattice(x)) n = gpt.norm2(x) / gpt.inner_product(x, lI).real x /= n x -= lI n2 = gpt.norm2(x)**0.5 / x.grid.gsites order = 8 * int(16 / (-numpy.log10(n2))) assert n2 < convergence_threshold o = gpt.copy(x) xn = gpt.copy(x) for j in range(2, order + 1): xn @= xn * x o -= xn * ((-1.0)**j / j) o += lI * numpy.log(n) if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r return o
def traceless_hermitian(src): if isinstance(src, list): return [traceless_hermitian(x) for x in src] src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src + 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "metropolis" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] step_size = self.params["step_size"] number_accept = 0 possible_accept = 0 t = g.timer("metropolis") t("action") action = g.component.real(g.eval(-g.trace(link * g.adj(staple)) * mask)) t("lattice") V = g.lattice(link) V_eye = g.identity(link) t("random") self.rng.element(V, scale=step_size, normal=True) t("update") V = g.where(mask, V, V_eye) link_prime = g.eval(V * link) action_prime = g.component.real( g.eval(-g.trace(link_prime * g.adj(staple)) * mask)) dp = g.component.exp(g.eval(action - action_prime)) rn = g.lattice(dp) t("random") self.rng.uniform_real(rn) t("random") accept = dp > rn accept *= mask number_accept += g.norm2(accept) possible_accept += g.norm2(mask) link @= g.where(accept, link_prime, link) t() g.project(link, project_method) # g.message(t) if verbose: g.message( f"Metropolis acceptance rate: {number_accept / possible_accept}" )
def project(self, U, method): if method == "defect_right" or method == "defect": # V = V0(1 + eps) with dag(eps) = eps , dag(V0) V0 = 1 # dag(V) V - 1 = (1+eps)(1+eps) - 1 = 2eps + O(eps^2) # Multiply from right with 1 - eps = 1 - 1/2 (dag(V)V-1) # det(V) = 1 + Tr(eps) = 1 + 1/2 Tr(dag(V) V - 1) # Multiply with 1 - Tr(eps) U *= gpt.component.pow(-1.0 / self.Nc)(gpt.matrix.det(U)) I = gpt.identity(U) eps = gpt.eval(0.5 * gpt.adj(U) * U - 0.5 * I) U @= U * (I - eps) elif method == "defect_left": # V = (1 + eps)V0 with dag(eps) = eps , dag(V0) V0 = 1 # V dag(V) - 1 = (1+eps)(1+eps) - 1 = 2eps + O(eps^2) # Multiply from left with 1 - eps = 1 - 1/2 (V dag(V)-1) U *= gpt.component.pow(-1.0 / self.Nc)(gpt.matrix.det(U)) I = gpt.identity(U) eps = gpt.eval(0.5 * U * gpt.adj(U) - 0.5 * I) U @= (I - eps) * U else: raise Exception("Unknown projection method")
def block_insert(self, U, u2, idx): U @= gpt.identity(U) assert u2.otype.Nc == 2 and u2.otype.Ndim == 2 idx = list(idx) cache = ot_matrix_su_n_fundamental_group.cache cache_key = f"{self.Nc}_{idx}_rev" if cache_key not in cache: pos = tuple([slice(None, None, None) for i in range(u2.grid.nd)]) plan = gpt.copy_plan(U, u2) for i in range(2): for j in range(2): plan.source += u2.view[pos + (i, j)] plan.destination += U.view[pos + (idx[i], idx[j])] cache[cache_key] = plan() cache[cache_key](U, u2)
def exp(i): t = gpt.timer("exp") t("eval") i = gpt.eval(i) # accept expressions t("prep") if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) n = gpt.norm2(x)**0.5 / x.grid.gsites order = 19 maxn = 0.05 ns = 0 if n > maxn: ns = int(numpy.log2(n / maxn)) x /= 2**ns o = gpt.lattice(x) t("mem") o[:] = 0 nfac = 1.0 xn = gpt.copy(x) t("id") o @= gpt.identity(o) t("add") o += xn t("loop") for j in range(2, order + 1): nfac /= j xn @= xn * x o += xn * nfac t("reduce") for j in range(ns): o @= o * o t("conv") if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r t() # gpt.message(t) return o
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "su2_heat_bath" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] # params niter = self.params["niter"] # temporaries grid = link.grid u2 = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2)) u2_eye = g.identity(u2) one = g.identity(g.complex(grid)) zero = g.complex(grid) zero[:] = 0 eps = g.complex(grid) eps[:] = grid.precision.eps * 10.0 xr = [g.complex(grid) for i in range(4)] a = [g.complex(grid) for i in range(4)] two_pi = g.complex(grid) two_pi[:] = 2.0 * np.pi accepted = g.complex(grid) d = g.complex(grid) V_eye = g.identity(link) # pauli pauli1, pauli2, pauli3 = tuple([g.lattice(u2) for i in range(3)]) ident = g.identity(u2) pauli1[:] = 1j * np.array([[0, 1], [1, 0]], dtype=grid.precision.complex_dtype) pauli2[:] = 1j * np.array([[0, 1j], [-1j, 0]], dtype=grid.precision.complex_dtype) pauli3[:] = 1j * np.array([[1, 0], [0, -1]], dtype=grid.precision.complex_dtype) # counter num_sites = round(g.norm2(g.where(mask, one, zero))) # shortcuts inv = g.component.pow(-1.0) # go through subgroups for subgroup in link.otype.su2_subgroups(): V = g.eval(link * g.adj(staple)) # extract u2 subgroup following Kennedy/Pendleton link.otype.block_extract(u2, V, subgroup) u2 @= u2 - g.adj(u2) + g.identity(u2) * g.trace(g.adj(u2)) udet = g.matrix.det(u2) adet = g.component.abs(udet) nzmask = adet > eps u2 @= g.where(nzmask, u2, u2_eye) udet = g.where(nzmask, udet, one) xi = g.eval(0.5 * g.component.sqrt(udet)) u2 @= 0.5 * u2 * inv(xi) # make sure that su2 subgroup projection worked assert g.group.defect(u2) < u2.grid.precision.eps * 10.0 xi @= 2.0 * xi alpha = g.component.real(xi) # main loop it = 0 num_accepted = 0 accepted[:] = 0 d[:] = 0 while (num_accepted < num_sites) and (it < niter): self.rng.uniform_real(xr, min=0.0, max=1.0) xr[1] @= -g.component.log(xr[1]) * inv(alpha) xr[2] @= -g.component.log(xr[2]) * inv(alpha) xr[3] @= g.component.cos(g.eval(xr[3] * two_pi)) xr[3] @= xr[3] * xr[3] xrsq = g.eval(xr[2] + xr[1] * xr[3]) d = g.where(accepted, d, xrsq) thresh = g.eval(one - d * 0.5) xrsq @= xr[0] * xr[0] newly_accepted = g.where(xrsq < thresh, one, zero) accepted = g.where(mask, g.where(newly_accepted, newly_accepted, accepted), zero) num_accepted = round(g.norm2(g.where(accepted, one, zero))) it += 1 if verbose: g.message(f"SU(2)-heatbath update needed {it} / {niter} iterations") # update link a[0] @= g.where(mask, one - d, zero) a123mag = g.component.sqrt(g.component.abs(one - a[0] * a[0])) phi, cos_theta = g.complex(grid), g.complex(grid) self.rng.uniform_real([phi, cos_theta]) phi @= phi * two_pi cos_theta @= (cos_theta * 2.0) - one sin_theta = g.component.sqrt(g.component.abs(one - cos_theta * cos_theta)) a[1] @= a123mag * sin_theta * g.component.cos(phi) a[2] @= a123mag * sin_theta * g.component.sin(phi) a[3] @= a123mag * cos_theta ua = g.eval(a[0] * ident + a[1] * pauli1 + a[2] * pauli2 + a[3] * pauli3) b = g.where(mask, g.adj(u2) * ua, ident) link.otype.block_insert(V, b, subgroup) link @= g.where(accepted, V * link, link) # check check = g.where(accepted, ua * g.adj(ua) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, b * g.adj(b) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, V * g.adj(V) - V_eye, 0.0 * V_eye) delta = (g.norm2(check) / g.norm2(V_eye)) ** 0.5 assert delta < grid.precision.eps * 10.0 # project g.project(link, project_method)
n0 = g.norm2(algebra) algebra2.otype.coordinates( algebra2, g.component.real(algebra.otype.coordinates(algebra))) algebra -= algebra2 eps = (g.norm2(algebra) / n0)**0.5 g.message(f"Test representation: {eps}") assert eps < eps_ref ################################################################################ # Test projection schemes on promoting SP to DP group membership ################################################################################ V0 = g.convert(rng.element(g.mcolor(grid_sp)), g.double) for method in ["defect_left", "defect_right"]: V = g.copy(V0) I = g.identity(V) I_s = g.identity(g.complex(grid_dp)) for i in range(3): eps_uni = (g.norm2(g.adj(V) * V - I) / g.norm2(I))**0.5 eps_det = (g.norm2(g.matrix.det(V) - I_s) / g.norm2(I_s))**0.5 g.message( f"Before {method} iteration {i}, unitarity defect: {eps_uni}, determinant defect: {eps_det}" ) g.project(V, method) assert eps_uni < 1e-14 and eps_det < 1e-14 ################################################################################ # Test SU(2) fundamental and conversion to adjoint ################################################################################ rng = g.random("test")
ip = left_algebra.otype.inner_product(left_algebra, right_algebra) c_left = left_algebra.otype.coordinates(left_algebra) c_right = right_algebra.otype.coordinates(right_algebra) ipc = sum([g.inner_product(l, r).real for l, r in zip(c_left, c_right)]) eps = abs(ip - ipc) / abs(ip + ipc) g.message(f"Test inner product: {eps}") assert eps < eps_ref * 10.0 ################################################################################ # Test projection schemes on promoting SP to DP group membership ################################################################################ V0 = g.convert(rng.element(g.mcolor(grid_sp)), g.double) for method in ["defect_left", "defect_right"]: V = g.copy(V0) I = g.identity(V) I_s = g.identity(g.complex(grid_dp)) for i in range(3): eps_uni = (g.norm2(g.adj(V) * V - I) / g.norm2(I)) ** 0.5 eps_det = (g.norm2(g.matrix.det(V) - I_s) / g.norm2(I_s)) ** 0.5 g.message( f"Before {method} iteration {i}, unitarity defect: {eps_uni}, determinant defect: {eps_det}" ) g.project(V, method) assert eps_uni < 1e-14 and eps_det < 1e-14 ################################################################################ # Test SU(2) fundamental and conversion to adjoint ################################################################################
def __call__(self, link, staple, mask): """ Generate new U(1) links with P(U) = e^{ Re Staple U } using the heatbath algorithm of Hattori-Nakajima (hep-lat/9210016), which draws a random variable x in (-pi, -pi) from P(x) ~ exp(a cos(x)). """ verbose = g.default.is_verbose( "u1_heat_bath" ) # need verbosity categories [ performance, progress ] assert type(link) == g.lattice and type(staple) == g.lattice # component-wise functions needed below exp = g.component.exp log = g.component.log sqrt = g.component.sqrt cos = g.component.cos tan = g.component.tan atan = g.component.atan cosh = g.component.cosh tanh = g.component.tanh atanh = g.component.atanh inv = g.component.inv # functions needed in Hattori-Nakajima method def gmax(x, y): return g.where(x > y, x, y) def gmin(x, y): return g.where(x < y, x, y) def h(x): return g.eval( 2.0 * inv(alpha) * atanh(g.eval(beta_s * tan(g.eval((2.0 * x - one) * tmp))))) def gg(x): return exp(g.eval(-a * G(h(x)))) def G(x): return g.eval(one - cos(x) - a_inv * log( g.eval(one + (cosh(g.eval(alpha * x)) - one) * inv(g.eval(one + beta))))) # temporaries a = g.component.abs(staple) # absolute value of staple a_inv = g.eval(inv(a)) # needed several times grid = a.grid one = g.identity(g.complex(grid)) zero = g.identity(g.complex(grid)) zero[:] = 0 Unew = g.complex(grid) # proposal for new links accepted = g.complex(grid) # mask for accepted links num_sites = round(g.norm2(g.where(mask, one, zero))) x1 = g.complex(grid) x2 = g.complex(grid) nohit = 0 # to compute acceptance ratio # parameters of Hattori-Nakajima method eps = 0.001 astar = 0.798953686083986 amax = gmax(zero, g.eval(a - astar * one)) delta = g.eval(0.35 * amax + 1.03 * sqrt(amax)) alpha = gmin(sqrt(g.eval(a * (2.0 - eps))), gmax(sqrt(g.eval(eps * a)), delta)) beta = g.eval((gmax( g.eval(alpha * alpha * a_inv), g.eval((cosh(g.eval(np.pi * alpha)) - one) * inv(g.eval(exp(g.eval(2.0 * a)) - one))), ) - one)) beta_s = sqrt(g.eval((one + beta) * inv(g.eval(one - beta)))) tmp = atan(g.eval(inv(beta_s) * tanh(g.eval(0.5 * np.pi * alpha)))) # main loop (large optimization potential but not time-critical anyway) num_accepted = 0 accepted[:] = 0 Unew[:] = 0 # worst-case acceptance ratio of Hattori-Nakajima is 0.88 while num_accepted < num_sites: self.rng.uniform_real(x1, min=0.0, max=1.0) self.rng.uniform_real(x2, min=0.0, max=1.0) Unew = g.where(accepted, Unew, exp(g.eval(1j * h(x1)))) newly_accepted = g.where(x2 < gg(x1), one, zero) accepted = g.where( mask, g.where(newly_accepted, newly_accepted, accepted), zero) num_accepted = round(g.norm2(g.where(accepted, one, zero))) nohit += num_sites - num_accepted if verbose: g.message( f"Acceptance ratio for U(1) heatbath update = {num_sites/(num_sites+nohit)}" ) # Unew was drawn with phase angle centered about zero # -> need to shift this by phase angle of staple # (we update every link, thus accepted = mask) link @= g.where(accepted, Unew * staple * a_inv, link)
def is_element(self, U): I = gpt.identity(U) err = (gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I))**0.5 # consider additional determinant check return err < U.grid.precision.eps * 10.0
g.sum(g.qcd.gauge.energy_density(U_wf, field=True)) / U_wf[0].grid.gsites) eps = abs(E - 0.3032029987236007) g.message(f"Energy density check after wilson flow at t=0.1: {eps}") assert eps < 1e-10 eps = abs(E - E_from_field) g.message(f"Energy density field test: {eps}") assert eps < 1e-10 # Test stout smearing U_stout = U P_stout = [] for i in range(3): U_stout = g.qcd.gauge.smear.stout(U_stout, rho=0.1) for mu in range(len(U_stout)): I = g.identity(U_stout[mu]) eps2 = g.norm2(U_stout[mu] * g.adj(U_stout[mu]) - I) / g.norm2(I) g.message( f"Unitarity check of stout-smeared links: mu = {mu}, eps2 = {eps2}" ) P_stout.append(g.qcd.gauge.plaquette(U_stout)) g.message(f"Stout smeared plaquettes {P_stout}") assert sorted(P_stout) == P_stout # make sure plaquettes go towards one # for given gauge configuration, cross-check against previous Grid code # this establishes the randomized check value used below # U = g.load("/hpcgpfs01/work/clehner/configs/24I_0p005/ckpoint_lat.IEEE64BIG.5000") # P = [g.qcd.gauge.plaquette(U),g.qcd.gauge.plaquette(g.qcd.gauge.smear.stout(U, rho=0.15, orthogonal_dimension=3)),g.qcd.gauge.plaquette(g.qcd.gauge.smear.stout(U, rho=0.1))] # P_comp = [0.588074,0.742136,0.820262]
def __init__(self, U, params): shift_eo.__init__(self, U, boundary_phases=params["boundary_phases"]) Nc = U[0].otype.Nc otype = g.ot_vector_spin_color(4, Nc) grid = U[0].grid grid_eo = grid.checkerboarded(g.redblack) self.F_grid = grid self.U_grid = grid self.F_grid_eo = grid_eo self.U_grid_eo = grid_eo self.vector_space_F = g.vector_space.explicit_grid_otype(self.F_grid, otype) self.vector_space_U = g.vector_space.explicit_grid_otype(self.U_grid, otype) self.vector_space_F_eo = g.vector_space.explicit_grid_otype( self.F_grid_eo, otype ) self.src_e = g.vspincolor(grid_eo) self.src_o = g.vspincolor(grid_eo) self.dst_e = g.vspincolor(grid_eo) self.dst_o = g.vspincolor(grid_eo) self.dst_e.checkerboard(g.even) self.dst_o.checkerboard(g.odd) if params["kappa"] is not None: assert params["mass"] is None self.m0 = 1.0 / params["kappa"] / 2.0 - 4.0 else: self.m0 = params["mass"] self.xi_0 = params["xi_0"] self.csw_r = params["csw_r"] / self.xi_0 self.csw_t = params["csw_t"] self.nu = params["nu"] self.kappa = 1.0 / (2.0 * (self.m0 + 1.0 + 3.0 * self.nu / self.xi_0)) self.open_bc = params["boundary_phases"][self.nd - 1] == 0.0 if self.open_bc: assert all( [ self.xi_0 == 1.0, self.nu == 1.0, self.csw_r == self.csw_t, "cF" in params, ] ) # open bc only for isotropic case, require cF passed self.cF = params["cF"] T = self.L[self.nd - 1] # compute field strength tensor if self.csw_r != 0.0 or self.csw_t != 0.0: self.clover = g.mspincolor(grid) self.clover[:] = 0 I = g.identity(self.clover) for mu in range(self.nd): for nu in range(mu + 1, self.nd): if mu == (self.nd - 1) or nu == (self.nd - 1): cp = self.csw_t else: cp = self.csw_r self.clover += ( -0.5 * cp * g.gamma[mu, nu] * I * g.qcd.gauge.field_strength(U, mu, nu) ) if self.open_bc: # set field strength tensor to unity at the temporal boundaries value = -0.5 * self.csw_t self.clover[:, :, :, 0, :, :, :, :] = 0.0 self.clover[:, :, :, T - 1, :, :, :, :] = 0.0 for alpha in range(4): for a in range(Nc): self.clover[:, :, :, 0, alpha, alpha, a, a] = value self.clover[:, :, :, T - 1, alpha, alpha, a, a] = value if self.cF != 1.0: # add improvement coefficients next to temporal boundaries value = self.cF - 1.0 for alpha in range(4): for a in range(Nc): self.clover[:, :, :, 1, alpha, alpha, a, a] += value self.clover[:, :, :, T - 2, alpha, alpha, a, a] += value # integrate kappa into clover matrix for inversion self.clover += 1.0 / 2.0 * 1.0 / self.kappa * I self.clover_inv = g.matrix.inv(self.clover) self.clover_eo = { g.even: g.lattice(grid_eo, self.clover.otype), g.odd: g.lattice(grid_eo, self.clover.otype), } self.clover_inv_eo = { g.even: g.lattice(grid_eo, self.clover.otype), g.odd: g.lattice(grid_eo, self.clover.otype), } for cb in self.clover_eo: g.pick_checkerboard(cb, self.clover_eo[cb], self.clover) g.pick_checkerboard(cb, self.clover_inv_eo[cb], self.clover_inv) else: self.clover = None self.clover_inv = None self.Meooe = g.matrix_operator( mat=lambda dst, src: self._Meooe(dst, src), vector_space=self.vector_space_F_eo, ) self.Mooee = g.matrix_operator( mat=lambda dst, src: self._Mooee(dst, src), inv_mat=lambda dst, src: self._MooeeInv(dst, src), vector_space=self.vector_space_F_eo, ) self.Dhop = g.matrix_operator( mat=lambda dst, src: self._Dhop(dst, src), vector_space=self.vector_space_F ) matrix_operator.__init__( self, lambda dst, src: self._M(dst, src), vector_space=self.vector_space_F ) self.G5M = g.matrix_operator( lambda dst, src: self._G5M(dst, src), vector_space=self.vector_space_F ) self.Mdiag = g.matrix_operator( lambda dst, src: self._Mdiag(dst, src), vector_space=self.vector_space_F ) self.ImportPhysicalFermionSource = g.matrix_operator( lambda dst, src: g.copy(dst, src), vector_space=self.vector_space_F ) self.ExportPhysicalFermionSolution = g.matrix_operator( lambda dst, src: g.copy(dst, src), vector_space=self.vector_space_F ) self.ExportPhysicalFermionSource = g.matrix_operator( lambda dst, src: g.copy(dst, src), vector_space=self.vector_space_F ) self.Dminus = g.matrix_operator( lambda dst, src: g.copy(dst, src), vector_space=self.vector_space_F )
tp = (t + Nt + dt) % Nt for u_dst, u_src in zip(U_temp, U0): u_dst[:, :, :, tp] = u_src[:, :, :, tp] for i in range(n_smear): g.message("smear", i) U_temp = g.qcd.gauge.smear.stout(U_temp, rho=rho_smear) for u_dst, u_src in zip(U, U_temp): u_dst[:, :, :, t] = u_src[:, :, :, t] # save smeared gauge field g.save(config_smeared, U, g.format.nersc()) g.message("Plaquette after", g.qcd.gauge.plaquette(U)) for u in U: g.message("Unitarity violation", g.norm2(u * g.adj(u) - g.identity(u)) / g.norm2(u)) g.message( "SU violation", g.norm2(g.matrix.det(u) - g.identity(g.complex(u.grid))) / g.norm2(u), ) # separate time slices and define laplace operator U3 = [g.separate(u, 3) for u in U[0:3]] for t in range(Nt): if t % t_groups != t_group: continue g.message(f"Laplace basis for time-slice {t}") U3_t = [u[t] for u in U3]
def cayley_hamilton_function_and_gradient_3(iQ, gradient_prime): # For now use Cayley Hamilton Decomposition for traceless Hermitian 3x3 matrices, # see https://arxiv.org/pdf/hep-lat/0311018.pdf I = g.identity(iQ) Q = g(-1j * iQ) Q2 = g(Q * Q) Q3 = g(Q * Q2) c0 = g(g.trace(Q3) * (1.0 / 3.0)) c1 = g(g.trace(Q2) * (1.0 / 2.0)) one = g.identity(c0) c0max = g(2.0 * g.component.pow(1.5)(c1 / 3.0)) theta = g.component.acos(c0 * g.component.inv(c0max)) u = g(g.component.sqrt(c1 / 3.0) * g.component.cos(theta / 3.0)) w = g(g.component.sqrt(c1) * g.component.sin(theta / 3.0)) u2 = g(u * u) w2 = g(w * w) xi0 = g(g.component.sin(w) * g.component.inv(w)) xi1 = g( g.component.cos(w) * g.component.inv(w2) - g.component.sin(w) * g.component.inv(w * w2)) cosw = g.component.cos(w) ixi0 = g(1j * xi0) emiu = g(g.component.cos(u) - 1j * g.component.sin(u)) e2iu = g(g.component.cos(2.0 * u) + 1j * g.component.sin(2.0 * u)) h0 = g(e2iu * (u2 - w2) + emiu * ((8.0 * u2 * cosw) + (2.0 * u * (3.0 * u2 + w2) * ixi0))) h1 = g(e2iu * (2.0 * u) - emiu * ((2.0 * u * cosw) - (3.0 * u2 - w2) * ixi0)) h2 = g(e2iu - emiu * (cosw + (3.0 * u) * ixi0)) fden = g.component.inv(9.0 * u2 - w2) f0 = g(h0 * fden) f1 = g(h1 * fden) f2 = g(h2 * fden) # first result, exp_iQ exp_iQ = g(f0 * I + f1 * Q + f2 * Q2) # next, compute jacobian components r01 = g((2.0 * u + 1j * 2.0 * (u2 - w2)) * e2iu + emiu * ((16.0 * u * cosw + 2.0 * u * (3.0 * u2 + w2) * xi0) + 1j * (-8.0 * u2 * cosw + 2.0 * (9.0 * u2 + w2) * xi0))) r11 = g((2.0 * one + 4j * u) * e2iu + emiu * ((-2.0 * cosw + (3.0 * u2 - w2) * xi0) + 1j * ((2.0 * u * cosw + 6.0 * u * xi0)))) r21 = g(2j * e2iu + emiu * (-3.0 * u * xi0 + 1j * (cosw - 3.0 * xi0))) r02 = g(-2.0 * e2iu + emiu * (-8.0 * u2 * xi0 + 1j * (2.0 * u * (cosw + xi0 + 3.0 * u2 * xi1)))) r12 = g(emiu * (2.0 * u * xi0 + 1j * (-cosw - xi0 + 3.0 * u2 * xi1))) r22 = g(emiu * (xi0 - 1j * (3.0 * u * xi1))) fden = g.component.inv((2.0 * (9.0 * u2 - w2) * (9.0 * u2 - w2))) b10 = g(2.0 * u * r01 + (3.0 * u2 - w2) * r02 - (30.0 * u2 + 2.0 * w2) * f0) b11 = g(2.0 * u * r11 + (3.0 * u2 - w2) * r12 - (30.0 * u2 + 2.0 * w2) * f1) b12 = g(2.0 * u * r21 + (3.0 * u2 - w2) * r22 - (30.0 * u2 + 2.0 * w2) * f2) b20 = g(r01 - (3.0 * u) * r02 - (24.0 * u) * f0) b21 = g(r11 - (3.0 * u) * r12 - (24.0 * u) * f1) b22 = g(r21 - (3.0 * u) * r22 - (24.0 * u) * f2) b10 *= fden b11 *= fden b12 *= fden b20 *= fden b21 *= fden b22 *= fden B1 = g(b10 * I + b11 * Q + b12 * Q2) B2 = g(b20 * I + b21 * Q + b22 * Q2) U_Sigma_prime = gradient_prime # gradient_prime = U * Sigma_prime Gamma = g( g.trace(U_Sigma_prime * B1) * Q + g.trace(U_Sigma_prime * B2) * Q2 + f1 * U_Sigma_prime + f2 * Q * U_Sigma_prime + f2 * U_Sigma_prime * Q) Lambda = g.qcd.gauge.project.traceless_hermitian(Gamma) return exp_iQ, Lambda
def is_element(self, U): I = gpt.identity(U) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) return err2 ** 0.5 < U.grid.precision.eps * 10.0
U[0].grid.gsites) eps = abs(q - q_transformed) g.message( f"Staple q[{mu}] before {q} and after {q_transformed} gauge transformation: {eps}" ) assert eps < 1e-14 # Test stout smearing U_stout = U P_stout = [] for i in range(3): U_stout = g.qcd.gauge.smear.stout(U_stout, rho=0.1) for mu in range(len(U_stout)): I = g.identity(U_stout[mu]) eps2 = g.norm2(U_stout[mu] * g.adj(U_stout[mu]) - I) / g.norm2(I) g.message( f"Unitarity check of stout-smeared links: mu = {mu}, eps2 = {eps2}" ) P_stout.append(g.qcd.gauge.plaquette(U_stout)) g.message(f"Stout smeared plaquettes {P_stout}") assert sorted(P_stout) == P_stout # make sure plaquettes go towards one # for given gauge configuration, cross-check against previous Grid code # this establishes the randomized check value used below # U = g.load("/hpcgpfs01/work/clehner/configs/24I_0p005/ckpoint_lat.IEEE64BIG.5000") # P = [g.qcd.gauge.plaquette(U),g.qcd.gauge.plaquette(g.qcd.gauge.smear.stout(U, rho=0.15, orthogonal_dimension=3)),g.qcd.gauge.plaquette(g.qcd.gauge.smear.stout(U, rho=0.1))] # P_comp = [0.588074,0.742136,0.820262]
def assert_unitary(U): I = g.identity(U) err = (g.norm2(U * g.adj(U) - I) / g.norm2(I))**0.5 assert err < U.grid.precision.eps * 10.0
def defect(self, U): I = gpt.identity(U) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) return err2**0.5
def defect(self, U): I = gpt.identity(U) I_s = gpt.identity(gpt.complex(U.grid)) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) err2 += gpt.norm2(gpt.matrix.det(U) - I_s) / gpt.norm2(I_s) return err2**0.5
gd = opt.gradient_descent(maxiter=p_maxiter_gd, eps=p_eps, step=p_gd_step) # Coulomb functional on each time-slice Nt_split = len(Vt_split) g.message(f"This rank has {Nt_split} time slices") for t in range(Nt_split): f = g.qcd.gauge.fix.landau([Usep_split[mu][t] for mu in range(3)]) fa = opt.fourier_accelerate.inverse_phat_square(Vt_split[t].grid, f) g.message(f"Run local time slice {t} / {Nt_split}") if rng is not None: rng.element(Vt_split[t]) else: Vt_split[t] @= g.identity(Vt_split[t]) if not cg(fa)(Vt_split[t], Vt_split[t]): gd(fa)(Vt_split[t], Vt_split[t]) group_defect = g.group.defect(Vt_split[t]) g.message(f"Distance to group manifold: {group_defect}") if group_defect > 1e-12: g.message( f"Time slice {t} on split grid {Vt_split[t].grid.srank} has group_defect = {group_defect}" ) sys.exit(1) g.message("Unsplit") g.unsplit(Vt, Vt_split, cache)
def project_to_traceless_anti_hermitian(src): src = g.eval(src) N = src.otype.shape[0] ret = g(0.5 * src - 0.5 * g.adj(src)) ret -= g.identity(src) * g.trace(ret) / N return ret
assert eps2 < eps**2.0 # test inv for grid, eps in [(grid_dp, 1e-14), (grid_sp, 1e-6)]: g.message(f""" Test log,exp,det,tr for {grid.precision.__name__} """) for dtype in [ g.mspincolor, g.mcolor, g.mspin, lambda grid: g.mcomplex(grid, 8) ]: rng = g.random("test") m = rng.cnormal(dtype(grid)) minv = g.matrix.inv(m) eye = g.identity(m) eps2 = g.norm2(m * minv - eye) / (12 * grid.fsites) g.message(f"test M*M^-1 = 1 for {m.otype.__name__}: {eps2}") assert eps2 < eps**2 # make logarithm well defined m @= eye + 0.01 * m m2 = g.matrix.exp(g.matrix.log(m)) eps2 = g.norm2(m - m2) / g.norm2(m) g.message(f"exp(log(m)) == m: {eps2}") assert eps2 < eps**2.0 eps2 = g.norm2( g.matrix.log(g.matrix.det(g.matrix.exp(m))) - g.trace(m)) / g.norm2(m) g.message(f"log(det(exp(m))) == tr(m): {eps2}")