def __init__(self, grid, base, dimensions=None): self.base = base if dimensions is None: dimensions = list(range(grid.nd)) self.fft = g.fft(dimensions) # create FA mask cache = {} self.weight = g.complex(grid) self.weight[:] = 0 coor = g.coordinates(self.weight) for mu in dimensions: c_mu = coor[:, mu].astype(np.complex128) c_mu_l = g.complex(grid) c_mu_l[coor, cache] = c_mu c_mu_l @= g.component.sin(c_mu_l * (np.pi / grid.gdimensions[mu])) c_mu_l @= c_mu_l * c_mu_l * complex(4.0) self.weight += c_mu_l # special consideration for zero self.weight[0, 0, 0, 0] = (2.0 * np.pi)**2.0 / np.prod( [grid.gdimensions[mu] for mu in dimensions])**(2.0 / len(dimensions)) # invert self.weight @= g.component.inv(self.weight) * complex( 4.0 * len(dimensions)) self.weight = [self.weight]
def __init__(self, n, precision): self.n = n self.fdimensions = [2**n] self.grid = g.grid(self.fdimensions, precision) self.verbose = g.default.is_verbose("qis_map") self.zero_coordinate = (0, ) # |00000 ... 0> state t = g.timer("map_init") t("coordinates") # TODO: need to split over multiple dimensions, single dimension can hold at most 32 bits self.coordinates = g.coordinates(self.grid) self.not_coordinates = [ np.bitwise_xor(self.coordinates, 2**i) for i in range(n) ] for i in range(n): self.not_coordinates[i].flags["WRITEABLE"] = False t("masks") self.one_mask = [] self.zero_mask = [] for i in range(n): proj = np.bitwise_and(self.coordinates, 2**i) mask = g.complex(self.grid) g.coordinate_mask(mask, proj != 0) self.one_mask.append(mask) mask = g.complex(self.grid) g.coordinate_mask(mask, proj == 0) self.zero_mask.append(mask) t() if self.verbose: g.message(t)
def fill_su2_components_into_suN(dst, su2_comps, su2_indices, cache=None): if isinstance(dst, gpt.lattice): n_colors = dst.otype.Nc separated = gpt.separate_color(dst) elif isinstance(dst, dict): n_keys = len(dst) n_colors = int(np.sqrt(n_keys)) assert (int(n_colors) - 1, int(n_colors) - 1) in dst separated = dst if cache is None: zero = gpt.complex(separated[0, 0].grid) zero[:] = 0.0 one = gpt.complex(separated[0, 0].grid) one[:] = 1.0 else: zero, one = cache i1, i2 = su2_indices separated[i1, i1] @= su2_comps[0] + 1j * su2_comps[3] separated[i1, i2] @= su2_comps[2] + 1j * su2_comps[1] separated[i2, i1] @= -su2_comps[2] + 1j * su2_comps[1] separated[i2, i2] @= su2_comps[0] - 1j * su2_comps[3] for ii in range(n_colors): for jj in range(n_colors): if ii not in [i1, i2] or jj not in [i1, i2]: separated[ii, jj] @= one if ii == jj else zero if isinstance(dst, gpt.lattice): gpt.merge_color(dst, separated)
def det(A): A = gpt.eval(A) assert type(A) == gpt.lattice r = gpt.complex(A.grid) to_list = gpt.util.to_list cgpt.determinant(r.v_obj[0], to_list(A)) return r
def is_element(self, U): I = gpt.identity(U) I_s = gpt.identity(gpt.complex(U.grid)) err2 = gpt.norm2(U * gpt.adj(U) - I) / gpt.norm2(I) err2 += gpt.norm2(gpt.matrix.det(U) - I_s) / gpt.norm2(I_s) # consider additional determinant check return err2**0.5 < U.grid.precision.eps * 10.0
def __init__( self, rng, number_of_qubits, precision=None, bit_map=None, lattice=None, bit_permutation=None, current_coordinates=None, bit_flipped_plan=None, ): if precision is None: precision = g.double if bit_map is None: bit_map = map_canonical(number_of_qubits, precision) if bit_permutation is None: bit_permutation = list(range(number_of_qubits)) current_coordinates = bit_map.coordinates self.rng = rng self.precision = precision self.number_of_qubits = number_of_qubits self.bit_map = bit_map self.current_coordinates = current_coordinates self.bit_flipped_plan = {} if bit_flipped_plan is None else bit_flipped_plan self.bit_permutation = bit_permutation self.classical_bit = [None] * number_of_qubits if lattice is not None: self.lattice = lattice else: self.lattice = g.complex(self.bit_map.grid) self.lattice[:] = 0 self.lattice[self.bit_map.zero_coordinate] = 1
def apply_exp_ixp(dst, src, p): # TODO: add sparse field support (x.internal_coordinates(), x.coordinates()) x = src.mview_coordinates() # create phase field phase = gpt.complex(src.grid) phase.checkerboard(src.checkerboard()) phase[x] = cgpt.coordinates_momentum_phase(x, p, src.grid.precision) dst @= phase * src
def one_mask(self): if self.one_mask_cache is not None: return self.one_mask_cache ones = gpt.complex(self.embedding_grid) ones[:] = 0 ones[self.embedded_coordinates] = 1 self.one_mask_cache = ones return ones
def lie(self, out, p={}): scale = p["scale"] grid = out.grid ca = gpt.complex(grid) lie = gpt.lattice(out) lie[:] = 0 for ta in out.otype.generators(grid.precision.complex_dtype): self.uniform_real(ca, {"min": -0.5, "max": 0.5}) lie += scale * 1j * ca * ta out @= gpt.matrix.exp(lie) return out
def __init__(self, coarse_grid, basis, mask=None, basis_n_block=8): assert type(coarse_grid) == gpt.grid assert len(basis) > 0 if mask is None: mask = gpt.complex(basis[0].grid) mask.checkerboard(basis[0].checkerboard()) mask[:] = 1 else: assert basis[0].grid is mask.grid assert len(mask.v_obj) == 1 c_otype = gpt.ot_vector_complex_additive_group(len(basis)) basis_size = c_otype.v_n1[0] self.coarse_grid = coarse_grid self.basis = basis self.obj = cgpt.create_block_map( coarse_grid.obj, basis, basis_size, basis_n_block, mask.v_obj[0], ) def _project(coarse, fine): assert fine[0].checkerboard().__name__ == basis[0].checkerboard( ).__name__ cgpt.block_project(self.obj, coarse, fine) def _promote(fine, coarse): assert fine[0].checkerboard().__name__ == basis[0].checkerboard( ).__name__ cgpt.block_promote(self.obj, coarse, fine) self.project = gpt.matrix_operator( mat=_project, vector_space=( gpt.vector_space.explicit_grid_otype(coarse_grid, c_otype), gpt.vector_space.explicit_lattice(basis[0]), ), accept_list=True, ) self.promote = gpt.matrix_operator( mat=_promote, vector_space=( gpt.vector_space.explicit_lattice(basis[0]), gpt.vector_space.explicit_grid_otype(coarse_grid, c_otype), ), accept_list=True, )
def weight(self): if self.weight_cache is not None: return self.weight_cache unique_coordinates, count = np.unique(self.local_coordinates, axis=0, return_counts=True) unique_coordinates = unique_coordinates.view(type(self.local_coordinates)) count = count.astype(self.grid.precision.complex_dtype) weight = gpt.complex(self.grid) weight[:] = 0 weight[unique_coordinates] = count self.weight_cache = weight return weight
def apply_exp_ixp(dst, src, p, origin, cache): cache_key = f"{src.grid}_{src.checkerboard().__name__}_{origin}_{p}" if cache_key not in cache: x = gpt.coordinates(src) phase = gpt.complex(src.grid) phase.checkerboard(src.checkerboard()) x_relative = x if origin is not None: x_relative = relative_coordinates(x, origin, src.grid.fdimensions) phase[x] = cgpt.coordinates_momentum_phase(x_relative, p, src.grid.precision) cache[cache_key] = phase dst @= cache[cache_key] * src
def __init__(self, coarse_grid, basis, mask=None, basis_n_block=8): assert type(coarse_grid) == gpt.grid assert len(basis) > 0 if mask is None: mask = gpt.complex(basis[0].grid) mask.checkerboard(basis[0].checkerboard()) mask[:] = 1 else: assert basis[0].grid is mask.grid assert len(mask.v_obj) == 1 c_otype = gpt.ot_vsinglet(len(basis)) basis_size = c_otype.v_n1[0] self.coarse_grid = coarse_grid self.basis = basis self.obj = cgpt.create_block_map( coarse_grid.obj, basis, basis_size, basis_n_block, mask.v_obj[0], ) def _project(coarse, fine): assert fine[0].checkerboard().__name__ == basis[0].checkerboard().__name__ cgpt.block_project(self.obj, coarse, fine) def _promote(fine, coarse): assert fine[0].checkerboard().__name__ == basis[0].checkerboard().__name__ cgpt.block_promote(self.obj, coarse, fine) self.project = gpt.matrix_operator( mat=_project, otype=(c_otype, basis[0].otype), grid=(coarse_grid, basis[0].grid), cb=(None, basis[0].checkerboard()), accept_list=True, ) self.promote = gpt.matrix_operator( mat=_promote, otype=(basis[0].otype, c_otype), grid=(basis[0].grid, coarse_grid), cb=(basis[0].checkerboard(), None), accept_list=True, )
def perform(self, root): global basis_size, T output_correlator = g.corr_io.writer(f"{root}/{self.name}/head.dat") vcj = g.load(f"{root}/{self.conf}/pm_basis/basis") for m in self.mom: mom_str = "_".join([str(x) for x in m]) p = np.array([ 2.0 * np.pi / vcj[0].grid.gdimensions[i] * m[i] for i in range(3) ] + [0]) phase = g.complex(vcj[0].grid) phase[:] = 1 phase @= g.exp_ixp(p) * phase g.message("L = ", vcj[0].grid.gdimensions) g.message("Momentum", p, m) for n in range(basis_size): t0 = g.time() vc_n = g(phase * vcj[n]) t1 = g.time() slc_nprime = [ g(g.adj(vcj[nprime]) * vc_n) for nprime in range(basis_size) ] t2 = g.time() slc = g.slice(slc_nprime, 3) t3 = g.time() for nprime in range(basis_size): output_correlator.write( f"output/mom/{mom_str}_n_{nprime}_{n}", slc[nprime]) t4 = g.time() if n % 10 == 0: g.message(n, "Timing", t1 - t0, t2 - t1, t3 - t2, t4 - t3) output_correlator.close()
def __init__(self, rng, number_of_qubits, precision=None, bit_map=None, lattice=None): if precision is None: precision = g.double if bit_map is None: bit_map = g.qis.map.canonical(number_of_qubits, precision) self.rng = rng self.precision = precision self.number_of_qubits = number_of_qubits self.bit_map = bit_map self.classical_bit = [None] * number_of_qubits if lattice is not None: self.lattice = lattice else: self.lattice = g.complex(self.bit_map.grid) self.lattice[:] = 0 self.lattice[self.bit_map.zero_coordinate] = 1
def element(self, out, p={}): if type(out) == list: return [self.element(x, p) for x in out] t = gpt.timer("element") scale = p["scale"] normal = p["normal"] grid = out.grid t("complex") ca = gpt.complex(grid) ca.checkerboard(out.checkerboard()) t("cartesian_space") cartesian_space = gpt.group.cartesian(out) t("csset") cartesian_space[:] = 0 t("gen") gen = cartesian_space.otype.generators(grid.precision.complex_dtype) t() for ta in gen: t("rng") if normal: self.normal(ca) else: self.uniform_real(ca, {"min": -0.5, "max": 0.5}) t("lc") cartesian_space += scale * ca * ta t("conv") gpt.convert(out, cartesian_space) t() # gpt.message(t) return out
def extract_su2_components(suN_matrix, su2_indices): if isinstance(suN_matrix, gpt.lattice): separated = gpt.separate_color(suN_matrix) elif isinstance(suN_matrix, dict): n_keys = len(suN_matrix) n_colors = int(np.sqrt(n_keys)) assert (n_colors - 1, n_colors - 1) in suN_matrix separated = suN_matrix i1, i2 = su2_indices su2_components = [gpt.complex(separated[0, 0].grid) for _ in range(4)] su2_components[0] @= gpt.component.real( gpt.eval(separated[i1, i1] + separated[i2, i2])) su2_components[1] @= gpt.component.imag( gpt.eval(separated[i1, i2] + separated[i2, i1])) su2_components[2] @= gpt.component.real( gpt.eval(separated[i1, i2] - separated[i2, i1])) su2_components[3] @= gpt.component.imag( gpt.eval(separated[i1, i1] - separated[i2, i2])) return su2_components
for i in range(n_smear): g.message("smear", i) U_temp = g.qcd.gauge.smear.stout(U_temp, rho=rho_smear) for u_dst, u_src in zip(U, U_temp): u_dst[:, :, :, t] = u_src[:, :, :, t] # save smeared gauge field g.save(config_smeared, U, g.format.nersc()) g.message("Plaquette after", g.qcd.gauge.plaquette(U)) for u in U: g.message("Unitarity violation", g.norm2(u * g.adj(u) - g.identity(u)) / g.norm2(u)) g.message( "SU violation", g.norm2(g.matrix.det(u) - g.identity(g.complex(u.grid))) / g.norm2(u), ) # separate time slices and define laplace operator U3 = [g.separate(u, 3) for u in U[0:3]] for t in range(Nt): if t % t_groups != t_group: continue g.message(f"Laplace basis for time-slice {t}") U3_t = [u[t] for u in U3] grid = U3_t[0].grid lap = g.create.smear.laplace(
g.message(res) n = 10000 res = [0, 0, 0, 0, 0] for i in range(n): z = rng.normal() res[0] += 1 res[1] += z res[2] += z**2 res[3] += z**3 res[4] += z**4 g.message(res[1] / res[0], res[2] / res[0], res[3] / res[0], res[4] / res[0]) v = g.complex(grid_dp) rng.normal(v) test_sequence_comp = np.array([ v[0, 0, 0, 0].real, v[2, 0, 0, 0].real, v[0, 2, 0, 0].real, v[1, 3, 1, 3].real, v[3, 2, 1, 0].real, ]) # print([ v[0,0,0,0].real, v[2,0,0,0].real, v[0,2,0,0].real, v[1,3,1,3].real, v[3,2,1,0].real ]) test_sequence_ref = np.array( [ 1.0336693180495347, -0.23474901515559715, -0.26622475825072717,
def create_links(A, fmat, basis, params): # NOTE: we expect the blocks in the basis vectors # to already be orthogonalized! # parameters make_hermitian = params["make_hermitian"] save_links = params["save_links"] assert not (make_hermitian and not save_links) # verbosity verbose = gpt.default.is_verbose("coarsen") # setup timings t = gpt.timer("coarsen") t("setup") # get grids f_grid = basis[0].grid c_grid = A[0].grid # directions/displacements we coarsen for dirs = [1, 2, 3, 4] if f_grid.nd == 5 else [0, 1, 2, 3] disp = +1 dirdisps_full = list(zip(dirs * 2, [+1] * 4 + [-1] * 4)) dirdisps_forward = list(zip(dirs, [disp] * 4)) nhops = len(dirdisps_full) selflink = nhops # setup fields Mvr = [gpt.lattice(basis[0]) for i in range(nhops)] tmp = gpt.lattice(basis[0]) oproj = gpt.vcomplex(c_grid, len(basis)) selfproj = gpt.vcomplex(c_grid, len(basis)) # setup masks onemask, blockevenmask, blockoddmask = ( gpt.complex(f_grid), gpt.complex(f_grid), gpt.complex(f_grid), ) dirmasks = [gpt.complex(f_grid) for p in range(nhops)] # auxilliary stuff needed for masks t("masks") onemask[:] = 1.0 coor = gpt.coordinates(blockevenmask) block = numpy.array(f_grid.ldimensions) / numpy.array(c_grid.ldimensions) block_cb = coor[:, :] // block[:] # fill masks for sites within even/odd blocks gpt.coordinate_mask(blockevenmask, numpy.sum(block_cb, axis=1) % 2 == 0) blockoddmask @= onemask - blockevenmask # fill masks for sites on borders of blocks dirmasks_forward_np = coor[:, :] % block[:] == block[:] - 1 dirmasks_backward_np = coor[:, :] % block[:] == 0 for mu in dirs: gpt.coordinate_mask(dirmasks[mu], dirmasks_forward_np[:, mu]) gpt.coordinate_mask(dirmasks[mu + 4], dirmasks_backward_np[:, mu]) # save applications of matrix and coarsening if possible dirdisps = dirdisps_forward if save_links else dirdisps_full # create block maps t("blockmap") dirbms = [ gpt.block.map(c_grid, basis, dirmasks[p]) for p, (mu, fb) in enumerate(dirdisps) ] fullbm = gpt.block.map(c_grid, basis) for i, vr in enumerate(basis): # apply directional hopping terms # this triggers len(dirdisps) comms -> TODO expose DhopdirAll from Grid # BUT problem with vector<Lattice<...>> in rhs t("apply_hop") [fmat.Mdir(*dirdisp)(Mvr[p], vr) for p, dirdisp in enumerate(dirdisps)] # coarsen directional terms + write to link for p, (mu, fb) in enumerate(dirdisps): t("coarsen_hop") dirbms[p].project(oproj, Mvr[p]) t("copy_hop") A[p][:, :, :, :, :, i] = oproj[:] # fast diagonal term: apply full matrix to both block cbs separately and discard hops into other cb t("apply_self") tmp @= (blockevenmask * fmat * vr * blockevenmask + blockoddmask * fmat * vr * blockoddmask) # coarsen diagonal term t("coarsen_self") fullbm.project(selfproj, tmp) # write to self link t("copy_self") A[selflink][:, :, :, :, :, i] = selfproj[:] if verbose: gpt.message("coarsen: done with vector %d" % i) # communicate opposite links if save_links: t("comm") communicate_links(A, dirdisps_forward, make_hermitian) t() if verbose: gpt.message(t)
Uf = g.convert(U, g.single) g.message(g.qcd.gauge.plaquette(Uf)) Uf0 = g.convert(U[0], g.single) g.message(g.norm2(Uf0)) del Uf0 g.meminfo() # Slice x = g.sum(Uf[0]) print(x) grid = g.grid([4, 4, 4, 4], g.single) gr = g.complex(grid) gr[0, 0, 0, 0] = 2 gr[1, 0, 0, 0] = 3 gride = g.grid([4, 4, 4, 4], g.single, g.redblack) gre = g.complex(gride) g.pick_cb(g.even, gre, gr) gre[2, 0, 0, 0] = 4 g.set_cb(gr, gre) g.meminfo() print(gre) gre.checkerboard(g.odd)
def __call__(self, link, staple, mask): """ Generate new U(1) links with P(U) = e^{ Re Staple U } using the heatbath algorithm of Hattori-Nakajima (hep-lat/9210016), which draws a random variable x in (-pi, -pi) from P(x) ~ exp(a cos(x)). """ verbose = g.default.is_verbose( "u1_heat_bath" ) # need verbosity categories [ performance, progress ] assert type(link) == g.lattice and type(staple) == g.lattice # component-wise functions needed below exp = g.component.exp log = g.component.log sqrt = g.component.sqrt cos = g.component.cos tan = g.component.tan atan = g.component.atan cosh = g.component.cosh tanh = g.component.tanh atanh = g.component.atanh inv = g.component.inv # functions needed in Hattori-Nakajima method def gmax(x, y): return g.where(x > y, x, y) def gmin(x, y): return g.where(x < y, x, y) def h(x): return g.eval( 2.0 * inv(alpha) * atanh(g.eval(beta_s * tan(g.eval((2.0 * x - one) * tmp))))) def gg(x): return exp(g.eval(-a * G(h(x)))) def G(x): return g.eval(one - cos(x) - a_inv * log( g.eval(one + (cosh(g.eval(alpha * x)) - one) * inv(g.eval(one + beta))))) # temporaries a = g.component.abs(staple) # absolute value of staple a_inv = g.eval(inv(a)) # needed several times grid = a.grid one = g.identity(g.complex(grid)) zero = g.identity(g.complex(grid)) zero[:] = 0 Unew = g.complex(grid) # proposal for new links accepted = g.complex(grid) # mask for accepted links num_sites = round(g.norm2(g.where(mask, one, zero))) x1 = g.complex(grid) x2 = g.complex(grid) nohit = 0 # to compute acceptance ratio # parameters of Hattori-Nakajima method eps = 0.001 astar = 0.798953686083986 amax = gmax(zero, g.eval(a - astar * one)) delta = g.eval(0.35 * amax + 1.03 * sqrt(amax)) alpha = gmin(sqrt(g.eval(a * (2.0 - eps))), gmax(sqrt(g.eval(eps * a)), delta)) beta = g.eval((gmax( g.eval(alpha * alpha * a_inv), g.eval((cosh(g.eval(np.pi * alpha)) - one) * inv(g.eval(exp(g.eval(2.0 * a)) - one))), ) - one)) beta_s = sqrt(g.eval((one + beta) * inv(g.eval(one - beta)))) tmp = atan(g.eval(inv(beta_s) * tanh(g.eval(0.5 * np.pi * alpha)))) # main loop (large optimization potential but not time-critical anyway) num_accepted = 0 accepted[:] = 0 Unew[:] = 0 # worst-case acceptance ratio of Hattori-Nakajima is 0.88 while num_accepted < num_sites: self.rng.uniform_real(x1, min=0.0, max=1.0) self.rng.uniform_real(x2, min=0.0, max=1.0) Unew = g.where(accepted, Unew, exp(g.eval(1j * h(x1)))) newly_accepted = g.where(x2 < gg(x1), one, zero) accepted = g.where( mask, g.where(newly_accepted, newly_accepted, accepted), zero) num_accepted = round(g.norm2(g.where(accepted, one, zero))) nohit += num_sites - num_accepted if verbose: g.message( f"Acceptance ratio for U(1) heatbath update = {num_sites/(num_sites+nohit)}" ) # Unew was drawn with phase angle centered about zero # -> need to shift this by phase angle of staple # (we update every link, thus accepted = mask) link @= g.where(accepted, Unew * staple * a_inv, link)
# MPI ################################################################################ grid_sp.barrier() nodes = grid_sp.globalsum(1) assert nodes == grid_sp.Nprocessors a = np.array([[1.0, 2.0, 3.0], [4, 5, 6j]], dtype=np.complex64) b = np.copy(a) grid_sp.globalsum(a) eps = a / nodes - b assert np.linalg.norm(eps) < 1e-7 ################################################################################ # Test Cshifts ################################################################################ # create a complex lattice on the grid src = g.complex(grid_sp) # zero out all points and set the value at global position 0,0,0,0 to 2 src[:] = 0 src[0, 0, 0, 0] = complex(2, 1) # create a new lattice that is compatible with another new = g.lattice(src) # create a new lattice that is a copy of another original = g.copy(src) # or copy the contents from one lattice to another g.copy(new, src) # cshift into a new lattice dst
################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x) assert mv.itemsize == 1 and mv.shape[0] == len(mv) assert sys.getrefcount(x) == 3 del mv assert sys.getrefcount(x) == 2 ################################################################################ # Test pinning ################################################################################ l_v = g.complex(grid_sp) pin = g.pin(l_v, g.accelerator) del l_v del pin ################################################################################ # Test assignments ################################################################################ pos = g.coordinates(l_dp) lhs = g.lattice(l_dp) def assign_copy(): g.copy(lhs, l_dp)
algebra2.otype.coordinates( algebra2, g.component.real(algebra.otype.coordinates(algebra))) algebra -= algebra2 eps = (g.norm2(algebra) / n0)**0.5 g.message(f"Test representation: {eps}") assert eps < eps_ref ################################################################################ # Test projection schemes on promoting SP to DP group membership ################################################################################ V0 = g.convert(rng.element(g.mcolor(grid_sp)), g.double) for method in ["defect_left", "defect_right"]: V = g.copy(V0) I = g.identity(V) I_s = g.identity(g.complex(grid_dp)) for i in range(3): eps_uni = (g.norm2(g.adj(V) * V - I) / g.norm2(I))**0.5 eps_det = (g.norm2(g.matrix.det(V) - I_s) / g.norm2(I_s))**0.5 g.message( f"Before {method} iteration {i}, unitarity defect: {eps_uni}, determinant defect: {eps_det}" ) g.project(V, method) assert eps_uni < 1e-14 and eps_det < 1e-14 ################################################################################ # Test SU(2) fundamental and conversion to adjoint ################################################################################ rng = g.random("test")
def project_to_suN_step(dest, unprojected): t_total = -gpt.time() t_product, t_separate, t_merge, t_su2extract, t_su2fill, t_calcnorm, t_applynorm = [ 0.0 for _ in range(7) ] vol = dest.grid.fsites n_colors = dest.otype.Nc tmp = gpt.mcolor(dest.grid) zero = gpt.complex(dest.grid) zero[:] = 0.0 one = gpt.complex(dest.grid) one[:] = 1.0 square = gpt.component.pow(2) norm = gpt.complex(dest.grid) for su2_index in range(n_colors * (n_colors - 1) // 2): index, i1, i2 = 0, None, None for ii in range(1, n_colors): for jj in range(n_colors - ii): if index == su2_index and i1 is None: i1 = jj i2 = ii + jj index += 1 t_product -= gpt.time() tmp @= dest * unprojected t_product += gpt.time() t_separate -= gpt.time() tmp_sep = gpt.separate_color(tmp) t_separate += gpt.time() t_su2extract -= gpt.time() su2_components = extract_su2_components(tmp_sep, [i1, i2]) t_su2extract += gpt.time() t_calcnorm -= gpt.time() norm @= gpt.component.inv( gpt.component.sqrt( gpt.eval(su2_components[0] * su2_components[0] + su2_components[1] * su2_components[1] + su2_components[2] * su2_components[2] + su2_components[3] * su2_components[3]))) t_calcnorm += gpt.time() t_applynorm -= gpt.time() su2_components[0] @= su2_components[0] * norm su2_components[1] @= -su2_components[1] * norm su2_components[2] @= -su2_components[2] * norm su2_components[3] @= -su2_components[3] * norm t_applynorm += gpt.time() t_su2fill -= gpt.time() fill_su2_components_into_suN(tmp_sep, su2_components, [i1, i2], cache=[zero, one]) t_su2fill += gpt.time() t_merge -= gpt.time() gpt.merge_color(tmp, tmp_sep) t_merge += gpt.time() t_product -= gpt.time() dest @= tmp * dest t_product += gpt.time() t_total += gpt.time() if gpt.default.is_verbose("project_to_suN_step"): t_profiled = t_product + t_separate + t_merge + t_su2extract + t_su2fill + t_calcnorm + t_applynorm t_unprofiled = t_total - t_profiled gpt.message("project_to_suN_step: total", t_total, "s") gpt.message("project_to_suN_step: t_product", t_product, "s", round(100 * t_product / t_total, 1), "%") gpt.message("project_to_suN_step: t_separate", t_separate, "s", round(100 * t_separate / t_total, 1), "%") gpt.message("project_to_suN_step: t_merge", t_merge, "s", round(100 * t_merge / t_total, 1), "%") gpt.message("project_to_suN_step: t_su2extract", t_su2extract, "s", round(100 * t_su2extract / t_total, 1), "%") gpt.message("project_to_suN_step: t_su2fill", t_su2fill, "s", round(100 * t_su2fill / t_total, 1), "%") gpt.message("project_to_suN_step: t_calcnorm", t_calcnorm, "s", round(100 * t_calcnorm / t_total, 1), "%") gpt.message("project_to_suN_step: t_applynorm", t_applynorm, "s", round(100 * t_applynorm / t_total, 1), "%") gpt.message("project_to_suN_step: unprofiled", t_unprofiled, "s", round(100 * t_unprofiled / t_total, 1), "%")
for i in range(40): inner_comp += lhs.array.conjugate()[i] * rhs.array[i] assert abs(inner_comp - inner) < 1e-14 assert inner.real == 700.0 # demonstrate slicing of internal indices vc = g.vcomplex(grid, 30) vc[0, 0, 0, 0, 0] = 1 vc[0, 0, 0, 0, 1:29] = 1.5 vc[0, 0, 0, 0, 29] = 2 vc_comp = g.vcomplex([1] + [1.5] * 28 + [2], 30) eps2 = g.norm2(vc[0, 0, 0, 0] - vc_comp) assert eps2 < 1e-13 # demonstrate mask mask = g.complex(grid) mask[:] = 0 mask[0, 1, 2, 3] = 1 vc[:] = vc[0, 0, 0, 0] vcmask = g.eval(mask * vc) assert g.norm2(vcmask[0, 0, 0, 0]) < 1e-13 assert g.norm2(vcmask[0, 1, 2, 3] - vc_comp) < 1e-13 # demonstrate sign flip needed for MG sign = g.vcomplex([1] * 15 + [-1] * 15, 30) vc_comp = g.vcomplex([1] + [1.5] * 14 + [-1.5] * 14 + [-2], 30) vc @= sign * vc eps2 = g.norm2(vc[0, 0, 0, 0] - vc_comp) assert eps2 < 1e-13 # demonstrate matrix * vector
U_eps = g.algorithms.integrator.runge_kutta_4(U, dC, eps) # integrate manually with lower-order routine and smaller step t = 0.0 U_delta = g.copy(U) N_steps = 100 delta = eps / N_steps for i in range(N_steps): U_delta @= g.matrix.exp(1j * dC(U_delta) * delta) * U_delta eps_test = (g.norm2(U_delta - U_eps)**0.5 / U_eps.grid.gsites / U_eps.otype.nfloats / eps) eps_ref = 10 * delta**2.0 g.message(f"Test on {U.otype.__name__}: {eps_test} < {eps_ref}") assert eps_test < eps_ref # finally integrate a simple non-linear DGL # y'(t) = y(t)**2 # y(0) = 1 # expected: y(t) = 1.0 / (1.0 - t) U = g.complex(grid) U[:] = 1.0 eps = 0.01 U_eps = g.algorithms.integrator.runge_kutta_4(U, lambda u: g(u * u), eps)[0, 0, 0, 0] U_exp = 1.0 / (1.0 - eps) eps_test = abs(U_eps - U_exp) / eps eps_ref = eps**3.0 g.message(f"Test on geometric series: {eps_test} < {eps_ref}") assert eps_test < eps_ref
def __call__(self, link, staple, mask): verbose = g.default.is_verbose( "su2_heat_bath" ) # need verbosity categories [ performance, progress ] project_method = self.params["project_method"] # params niter = self.params["niter"] # temporaries grid = link.grid u2 = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2)) u2_eye = g.identity(u2) one = g.identity(g.complex(grid)) zero = g.complex(grid) zero[:] = 0 eps = g.complex(grid) eps[:] = grid.precision.eps * 10.0 xr = [g.complex(grid) for i in range(4)] a = [g.complex(grid) for i in range(4)] two_pi = g.complex(grid) two_pi[:] = 2.0 * np.pi accepted = g.complex(grid) d = g.complex(grid) V_eye = g.identity(link) # pauli pauli1, pauli2, pauli3 = tuple([g.lattice(u2) for i in range(3)]) ident = g.identity(u2) pauli1[:] = 1j * np.array([[0, 1], [1, 0]], dtype=grid.precision.complex_dtype) pauli2[:] = 1j * np.array([[0, 1j], [-1j, 0]], dtype=grid.precision.complex_dtype) pauli3[:] = 1j * np.array([[1, 0], [0, -1]], dtype=grid.precision.complex_dtype) # counter num_sites = round(g.norm2(g.where(mask, one, zero))) # shortcuts inv = g.component.pow(-1.0) # go through subgroups for subgroup in link.otype.su2_subgroups(): V = g.eval(link * g.adj(staple)) # extract u2 subgroup following Kennedy/Pendleton link.otype.block_extract(u2, V, subgroup) u2 @= u2 - g.adj(u2) + g.identity(u2) * g.trace(g.adj(u2)) udet = g.matrix.det(u2) adet = g.component.abs(udet) nzmask = adet > eps u2 @= g.where(nzmask, u2, u2_eye) udet = g.where(nzmask, udet, one) xi = g.eval(0.5 * g.component.sqrt(udet)) u2 @= 0.5 * u2 * inv(xi) # make sure that su2 subgroup projection worked assert g.group.defect(u2) < u2.grid.precision.eps * 10.0 xi @= 2.0 * xi alpha = g.component.real(xi) # main loop it = 0 num_accepted = 0 accepted[:] = 0 d[:] = 0 while (num_accepted < num_sites) and (it < niter): self.rng.uniform_real(xr, min=0.0, max=1.0) xr[1] @= -g.component.log(xr[1]) * inv(alpha) xr[2] @= -g.component.log(xr[2]) * inv(alpha) xr[3] @= g.component.cos(g.eval(xr[3] * two_pi)) xr[3] @= xr[3] * xr[3] xrsq = g.eval(xr[2] + xr[1] * xr[3]) d = g.where(accepted, d, xrsq) thresh = g.eval(one - d * 0.5) xrsq @= xr[0] * xr[0] newly_accepted = g.where(xrsq < thresh, one, zero) accepted = g.where(mask, g.where(newly_accepted, newly_accepted, accepted), zero) num_accepted = round(g.norm2(g.where(accepted, one, zero))) it += 1 if verbose: g.message(f"SU(2)-heatbath update needed {it} / {niter} iterations") # update link a[0] @= g.where(mask, one - d, zero) a123mag = g.component.sqrt(g.component.abs(one - a[0] * a[0])) phi, cos_theta = g.complex(grid), g.complex(grid) self.rng.uniform_real([phi, cos_theta]) phi @= phi * two_pi cos_theta @= (cos_theta * 2.0) - one sin_theta = g.component.sqrt(g.component.abs(one - cos_theta * cos_theta)) a[1] @= a123mag * sin_theta * g.component.cos(phi) a[2] @= a123mag * sin_theta * g.component.sin(phi) a[3] @= a123mag * cos_theta ua = g.eval(a[0] * ident + a[1] * pauli1 + a[2] * pauli2 + a[3] * pauli3) b = g.where(mask, g.adj(u2) * ua, ident) link.otype.block_insert(V, b, subgroup) link @= g.where(accepted, V * link, link) # check check = g.where(accepted, ua * g.adj(ua) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, b * g.adj(b) - ident, 0.0 * ident) delta = (g.norm2(check) / g.norm2(ident)) ** 0.5 assert delta < grid.precision.eps * 10.0 check = g.where(accepted, V * g.adj(V) - V_eye, 0.0 * V_eye) delta = (g.norm2(check) / g.norm2(V_eye)) ** 0.5 assert delta < grid.precision.eps * 10.0 # project g.project(link, project_method)
def __init__(self, U, params): assert U[0].grid.nd == 4, "Only 4 dimensions implemented for now." # there could be a chiral U(1) field after U shift_eo.__init__(self, U[0:4], params) # stuff that's needed later on Ndim = U[0].otype.Ndim otype = g.ot_vector_color(Ndim) grid = U[0].grid grid_eo = grid.checkerboarded(g.redblack) self.F_grid = grid self.U_grid = grid self.F_grid_eo = grid_eo self.U_grid_eo = grid_eo self.src_e = g.vector_color(grid_eo, Ndim) self.src_o = g.vector_color(grid_eo, Ndim) self.dst_e = g.vector_color(grid_eo, Ndim) self.dst_o = g.vector_color(grid_eo, Ndim) self.dst_e.checkerboard(g.even) self.dst_o.checkerboard(g.odd) self.mass = ( params["mass"] if "mass" in params and params["mass"] != 0.0 else None ) self.mu5 = params["mu5"] if "mu5" in params and params["mu5"] != 0.0 else None self.chiral = params["chiral"] if "chiral" in params else None # matrix operators self.Mooee = g.matrix_operator( lambda dst, src: self._Mooee(dst, src), otype=otype, grid=grid_eo ) self.Meooe = g.matrix_operator( lambda dst, src: self._Meooe(dst, src), otype=otype, grid=grid_eo ) matrix_operator.__init__( self, lambda dst, src: self._M(dst, src), otype=otype, grid=grid ) self.Mdiag = g.matrix_operator( lambda dst, src: self._Mdiag(dst, src), otype=otype, grid=grid ) # staggered phases # see also Grid/Grid/qcd/action/fermion/StaggeredImpl.h _phases = [g.complex(grid) for i in range(4)] for mu in range(4): _phases[mu][:] = 1.0 for x in range(0, grid.fdimensions[0], 2): _phases[1][x + 1, :, :, :] = -1.0 for y in range(0, grid.fdimensions[1], 2): _phases[2][x, y + 1, :, :] = -1.0 _phases[2][x + 1, y, :, :] = -1.0 for z in range(0, grid.fdimensions[2], 2): _phases[3][x, y, z + 1, :] = -1.0 _phases[3][x, y + 1, z, :] = -1.0 _phases[3][x + 1, y, z, :] = -1.0 _phases[3][x + 1, y + 1, z + 1, :] = -1.0 # use stride > 1 once it is implemented: # _phases[1][1::2, :, :, :] = -1.0 # _phases[2][0::2, 1::2, :, :] = -1.0 # _phases[2][1::2, 0::2, :, :] = -1.0 # _phases[3][0::2, 0::2, 1::2, :] = -1.0 # _phases[3][0::2, 1::2, 0::2, :] = -1.0 # _phases[3][1::2, 0::2, 0::2, :] = -1.0 # _phases[3][1::2, 1::2, 1::2, :] = -1.0 self.phases = {} for cb in [g.even, g.odd]: _phases_eo = [g.lattice(grid_eo, _phases[0].otype) for i in range(4)] for mu in range(4): g.pick_checkerboard(cb, _phases_eo[mu], _phases[mu]) self.phases[cb] = _phases_eo # theta is the chiral U(1) gauge field if self.chiral: # for now, allow both mu5 and chiral U(1) field for testing purposes # assert "mu5" not in params, "should not have both mu5 and chiral in params" assert len(U) == 8, "chiral U(1) field missing?" self.theta = {} for cb in [g.even, g.odd]: _theta_eo = [g.lattice(grid_eo, U[4].otype) for i in range(4)] for mu in range(4): g.pick_checkerboard(cb, _theta_eo[mu], U[4 + mu]) self.theta[cb] = _theta_eo # s(x) is defined between (2.2) and (2.3) in # https://link.springer.com/content/pdf/10.1007/JHEP06(2015)094.pdf if self.mu5: self.s = {} _s = g.complex(grid) for y in range(0, grid.fdimensions[1], 2): _s[:, y, :, :] = 1.0 _s[:, y + 1, :, :] = -1.0 for cb in [g.even, g.odd]: _s_eo = g.lattice(grid_eo, _s.otype) g.pick_checkerboard(cb, _s_eo, _s) self.s[cb] = _s_eo