def log(i, convergence_threshold=0.5): # i = n*(1 + x), log(i) = log(n) + log(1+x) # x = i/n - 1, |x|^2 = <i/n - 1, i/n - 1> = |i|^2/n^2 + |1|^2 - (<i,1> + <1,i>)/n # d/dn |x|^2 = -2 |i|^2/n^3 + (<i,1> + <1,i>)/n^2 = 0 -> 2|i|^2 == n (<i,1> + <1,i>) if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) I = numpy.identity(x.otype.shape[0], x.grid.precision.complex_dtype) lI = gpt.lattice(x) lI[:] = I n = gpt.norm2(x) / gpt.inner_product(x, lI).real x /= n x -= lI n2 = gpt.norm2(x)**0.5 / x.grid.gsites order = 8 * int(16 / (-numpy.log10(n2))) assert n2 < convergence_threshold o = gpt.copy(x) xn = gpt.copy(x) for j in range(2, order + 1): xn @= xn * x o -= xn * ((-1.0)**j / j) o += lI * numpy.log(n) if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r return o
def exp(i): if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) n = gpt.norm2(x)**0.5 / x.grid.gsites order = 19 maxn = 0.05 ns = 0 if n > maxn: ns = int(numpy.log2(n / maxn)) x /= 2**ns o = gpt.lattice(x) o[:] = 0 nfac = 1.0 xn = gpt.copy(x) o[:] = numpy.identity(o.otype.shape[0], o.grid.precision.complex_dtype) o += xn for j in range(2, order + 1): nfac /= j xn @= xn * x o += xn * nfac for j in range(ns): o @= o * o if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r return o
def check_inner_product(left, right, eps_ref): left_algebra = g.convert(left, left.otype.cartesian()) right_algebra = g.convert(right, right.otype.cartesian()) ip = left_algebra.otype.inner_product(left_algebra, right_algebra) c_left = left_algebra.otype.coordinates(left_algebra) c_right = right_algebra.otype.coordinates(right_algebra) ipc = sum([g.inner_product(l, r).real for l, r in zip(c_left, c_right)]) eps = abs(ip - ipc) / abs(ip + ipc) g.message(f"Test inner product: {eps}") assert eps < eps_ref * 10.0
def mk_eig(gf, job_tag, inv_type): timer = q.Timer(f"py:mk_eig({job_tag},{inv_type})", True) timer.start() gpt_gf = g.convert(qg.gpt_from_qlat(gf), g.single) parity = g.odd params = get_lanc_params(job_tag, inv_type) q.displayln_info(f"mk_eig: job_tag={job_tag} inv_type={inv_type}") q.displayln_info(f"mk_eig: params={params}") fermion_params = params["fermion_params"] if "omega" in fermion_params: qm = g.qcd.fermion.zmobius(gpt_gf, fermion_params) else: qm = g.qcd.fermion.mobius(gpt_gf, fermion_params) w = g.qcd.fermion.preconditioner.eo2_ne(parity=parity)(qm) def make_src(rng): src = g.vspincolor(w.F_grid_eo) # src[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) rng.cnormal(src) src.checkerboard(parity) return src pit = g.algorithms.eigen.power_iteration(**params["pit_params"]) pit_ev, _, _ = pit(w.Mpc, make_src(g.random("lanc"))) q.displayln_info(f"mk_eig: pit_ev={pit_ev}") # cheby = g.algorithms.polynomial.chebyshev(params["cheby_params"]) irl = g.algorithms.eigen.irl(params["irl_params"]) evec, ev = irl(cheby(w.Mpc), make_src(g.random("lanc"))) evals = g.algorithms.eigen.evals(w.Mpc, evec, check_eps2=1e-6, real=True) g.mem_report() # timer.stop() return evec, evals
def get_fgrid(total_site, fermion_params): geo = q.Geometry(total_site, 1) gf = q.GaugeField(geo) gf.set_unit() gpt_gf = g.convert(gpt_from_qlat(gf), g.single) if "omega" in fermion_params: qm = g.qcd.fermion.zmobius(gpt_gf, fermion_params) else: qm = g.qcd.fermion.mobius(gpt_gf, fermion_params) return qm.F_grid_eo
def exp(i): t = gpt.timer("exp") t("eval") i = gpt.eval(i) # accept expressions t("prep") if i.grid.precision != gpt.double: x = gpt.convert(i, gpt.double) else: x = gpt.copy(i) n = gpt.norm2(x)**0.5 / x.grid.gsites order = 19 maxn = 0.05 ns = 0 if n > maxn: ns = int(numpy.log2(n / maxn)) x /= 2**ns o = gpt.lattice(x) t("mem") o[:] = 0 nfac = 1.0 xn = gpt.copy(x) t("id") o @= gpt.identity(o) t("add") o += xn t("loop") for j in range(2, order + 1): nfac /= j xn @= xn * x o += xn * nfac t("reduce") for j in range(ns): o @= o * o t("conv") if i.grid.precision != gpt.double: r = gpt.lattice(i) gpt.convert(r, o) o = r t() # gpt.message(t) return o
def element(self, out, p={}): if type(out) == list: return [self.element(x, p) for x in out] t = gpt.timer("element") scale = p["scale"] normal = p["normal"] grid = out.grid t("complex") ca = gpt.complex(grid) ca.checkerboard(out.checkerboard()) t("cartesian_space") cartesian_space = gpt.group.cartesian(out) t("csset") cartesian_space[:] = 0 t("gen") gen = cartesian_space.otype.generators(grid.precision.complex_dtype) t() for ta in gen: t("rng") if normal: self.normal(ca) else: self.uniform_real(ca, {"min": -0.5, "max": 0.5}) t("lc") cartesian_space += scale * ca * ta t("conv") gpt.convert(out, cartesian_space) t() # gpt.message(t) return out
def _converted(dst, src, mat, l, r, t=lambda x: None): t("converted: setup") conv_src = [ self.vector_space[r].lattice(None, x.otype, x.checkerboard()) for x in src ] conv_dst = [ self.vector_space[l].lattice(None, x.otype, x.checkerboard()) for x in dst ] t("converted: convert") gpt.convert(conv_src, src) if accept_guess[l]: gpt.convert(conv_dst, dst) t("converted: matrix") mat(conv_dst, conv_src) t("converted: convert") gpt.convert(dst, conv_dst) t()
def left_increment(dst, src_left, scale): dst = g.util.to_list(dst) src_left = g.util.to_list(src_left) group = dst[0].otype algebra = group.cartesian() assert src_left[0].otype.__name__ == algebra.__name__ for src_left_mu, dst_mu in zip(src_left, dst): if group.__name__ != algebra.__name__: dst_mu @= group.compose( g.project(g.convert(g(scale * src_left_mu), group), "defect"), dst_mu) else: dst_mu @= group.compose(g(scale * src_left_mu), dst_mu)
def check_representation(U, eps_ref): algebra = g.convert(U, U.otype.cartesian()) # then test coordinates function algebra2 = g.lattice(algebra) algebra2[:] = 0 algebra2.otype.coordinates(algebra2, algebra.otype.coordinates(algebra)) eps = (g.norm2(algebra2 - algebra) / g.norm2(algebra))**0.5 g.message(f"Test coordinates: {eps}") assert eps < eps_ref # now project to algebra and make sure it is a linear combination of # the provided generators n0 = g.norm2(algebra) algebra2.otype.coordinates( algebra2, g.component.real(algebra.otype.coordinates(algebra))) algebra -= algebra2 eps = (g.norm2(algebra) / n0)**0.5 g.message(f"Test representation: {eps}") assert eps < eps_ref
def _converted(dst, src, mat, l, r, t=lambda x: None): t("converted: setup") conv_src = [gpt.lattice(self.grid[r], otype[r]) for x in src] conv_dst = [gpt.lattice(self.grid[l], otype[l]) for x in src] t("converted: convert") gpt.convert(conv_src, src) if accept_guess[l]: gpt.convert(conv_dst, dst) t("converted: matrix") mat(conv_dst, conv_src) t("converted: convert") gpt.convert(dst, conv_dst) t()
def _converted(dst, src, mat, l, r): t0 = gpt.time() conv_src = [gpt.lattice(self.grid[l], otype[l]) for x in src] conv_dst = [gpt.lattice(self.grid[r], otype[r]) for x in src] gpt.convert(conv_src, src) if accept_guess[l]: gpt.convert(conv_dst, dst) t1 = gpt.time() mat(conv_dst, conv_src) t2 = gpt.time() gpt.convert(dst, conv_dst) t3 = gpt.time() if verbose: gpt.message( "Converted to", to_precision.__name__, "in", t3 - t2 + t1 - t0, "s, matrix application in", t2 - t1, "s", )
def projected_convert(x, otype): return g.project(g.convert(x, otype), "defect")
# now project to algebra and make sure it is a linear combination of # the provided generators n0 = g.norm2(algebra) algebra2.otype.coordinates( algebra2, g.component.real(algebra.otype.coordinates(algebra))) algebra -= algebra2 eps = (g.norm2(algebra) / n0)**0.5 g.message(f"Test representation: {eps}") assert eps < eps_ref ################################################################################ # Test projection schemes on promoting SP to DP group membership ################################################################################ V0 = g.convert(rng.element(g.mcolor(grid_sp)), g.double) for method in ["defect_left", "defect_right"]: V = g.copy(V0) I = g.identity(V) I_s = g.identity(g.complex(grid_dp)) for i in range(3): eps_uni = (g.norm2(g.adj(V) * V - I) / g.norm2(I))**0.5 eps_det = (g.norm2(g.matrix.det(V) - I_s) / g.norm2(I_s))**0.5 g.message( f"Before {method} iteration {i}, unitarity defect: {eps_uni}, determinant defect: {eps_det}" ) g.project(V, method) assert eps_uni < 1e-14 and eps_det < 1e-14 ################################################################################ # Test SU(2) fundamental and conversion to adjoint
def converted(self, dst_precision): return self.updated(gpt.convert(self.U, dst_precision))
g.message( f"Signature: {pos} -> {pos_of_slice} with signs {sign_of_slice}") for i in range(source_time_slices): if point: srcD += (g.create.point(g.lattice(srcD), pos_of_slice[i]) * sign_of_slice[i]) else: srcD += g.create.wall.z2(g.lattice(srcD), pos_of_slice[i][3], rng) * (sign_of_slice[i] / vol3d**0.5) return srcD, pos_of_slice, sign_of_slice # exact positions for pos in source_positions_exact: srcD, pos_of_slice, sign_of_slice = create_source(pos) srcF = g.convert(srcD, g.single) prop_sloppy = g.eval(prop_l_sloppy * srcD) g.mem_report(details=False) prop_exact = g.eval(prop_l_exact * srcD) g.mem_report(details=False) prop_low = g.eval(prop_l_low * srcF) g.mem_report(details=False) for i in range(use_source_time_slices): contract(pos_of_slice[i], g.eval(sign_of_slice[i] * prop_exact), "exact") contract(pos_of_slice[i], g.eval(sign_of_slice[i] * prop_sloppy), "sloppy")
tr = 0.0 vol = float(U[0].grid.gsites) for mu in range(4): for nu in range(mu): tr += g.sum( g.trace(U[mu] * g.cshift(U[nu], mu, 1) * g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu]))) return 2. * tr.real / vol / 4. / 3. / 3. # Calculate Plaquette g.message(g.qcd.gauge.plaquette(U)) g.message(plaquette(U)) # Precision change Uf = g.convert(U, g.single) g.message(g.qcd.gauge.plaquette(Uf)) Uf0 = g.convert(U[0], g.single) g.message(g.norm2(Uf0)) del Uf0 g.meminfo() # Slice x = g.sum(Uf[0]) print(x) grid = g.grid([4, 4, 4, 4], g.single) gr = g.complex(grid)
def mk_gpt_inverter(gf, job_tag, inv_type, inv_acc, *, gt=None, mpi_split=None, n_grouped=1, eig=None, eps=1e-8, timer=True): if mpi_split is None: mpi_split = g.default.get_ivec("--mpi_split", None, 4) if mpi_split is not None: n_grouped = g.default.get_int("--grouped", 4) gpt_gf = qg.gpt_from_qlat(gf) pc = g.qcd.fermion.preconditioner if inv_type in [0, 1]: param = get_fermion_param(job_tag, inv_type, inv_acc) if eig is not None: # may need madwf param0 = get_fermion_param(job_tag, inv_type, inv_acc=0) is_madwf = get_ls_from_fermion_params( param) != get_ls_from_fermion_params(param0) else: is_madwf = False if "omega" in param: qm = g.qcd.fermion.zmobius(gpt_gf, param) else: qm = g.qcd.fermion.mobius(gpt_gf, param) inv = g.algorithms.inverter if job_tag[:5] == "test-": cg_mp = inv.cg({"eps": eps, "maxiter": 100}) elif inv_type == 0: cg_mp = inv.cg({"eps": eps, "maxiter": 200}) elif inv_type == 1: cg_mp = inv.cg({"eps": eps, "maxiter": 300}) else: raise Exception("mk_gpt_inverter") if mpi_split is None: cg_split = cg_mp else: cg_split = inv.split(cg_mp, mpi_split=mpi_split) if eig is not None: cg_defl = inv.coarse_deflate(eig[1], eig[0], eig[2]) cg = inv.sequence(cg_defl, cg_split) else: cg = cg_split if inv_type == 0: slv_5d = inv.preconditioned(pc.eo2_ne(), cg) elif inv_type == 1: slv_5d = inv.preconditioned(pc.eo2_ne(), cg) else: raise Exception("mk_gpt_inverter") if is_madwf: gpt_gf_f = g.convert(gpt_gf, g.single) if "omega" in param0: qm0 = g.qcd.fermion.zmobius(gpt_gf_f, param0) else: qm0 = g.qcd.fermion.mobius(gpt_gf_f, param0) cg_pv_f = inv.cg({"eps": eps, "maxiter": 90}) slv_5d_pv_f = inv.preconditioned(pc.eo2_ne(), cg_pv_f) slv_5d = pc.mixed_dwf(slv_5d, slv_5d_pv_f, qm0) if inv_acc == 0: maxiter = 1 elif inv_acc == 1: maxiter = 2 elif inv_acc == 2: maxiter = 200 else: raise Exception("mk_gpt_inverter") slv_qm = qm.propagator( inv.defect_correcting(inv.mixed_precision(slv_5d, g.single, g.double), eps=eps, maxiter=maxiter)).grouped(n_grouped) if timer is True: timer = q.Timer(f"py:inv({job_tag},{inv_type},{inv_acc})", True) elif timer is False: timer = q.TimerNone() inv_qm = qg.InverterGPT(inverter=slv_qm, timer=timer) else: raise Exception("mk_gpt_inverter") if gt is None: return inv_qm else: return q.InverterGaugeTransform(inverter=inv_qm, gt=gt)
# # Desc.: Test small core features that are not sufficiently complex # to require a separate test file. These tests need to be fast. # import gpt as g import numpy as np import sys, cgpt # grid L = [16, 16, 16, 32] grid_dp = g.grid(L, g.double) grid_sp = g.grid(L, g.single) # test fields l_dp = g.random("test").cnormal(g.vcolor(grid_dp)) l_sp = g.convert(l_dp, g.single) ################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x) assert mv.itemsize == 1 and mv.shape[0] == len(mv) assert sys.getrefcount(x) == 3 del mv assert sys.getrefcount(x) == 2 ################################################################################ # Test assignments ################################################################################
import gpt as g # parameters config = g.default.get("--config", None) evec_light = g.default.get("--evec_light", None) # abbreviations pc = g.qcd.fermion.preconditioner inv = g.algorithms.inverter # load config U = g.load(config) # sloppy strange quark strange_sloppy = g.qcd.fermion.zmobius( g.convert(U, g.single), { "mass": 0.0850, "M5": 1.8, "b": 1.0, "c": 0.0, "omega": [ 1.0903256131299373, 0.9570283702230611, 0.7048886040934104, 0.48979921782791747, 0.328608311201356,
def verify_single_versus_double_precision(rng, fermion_dp, fermion_sp): eps_ref = fermion_sp.F_grid.precision.eps * finger_print_tolerance for atag in fermion_dp.__dict__.keys(): a_dp = getattr(fermion_dp, atag) if isinstance(a_dp, g.projected_matrix_operator): a_sp = getattr(fermion_sp, atag) rhs_dp = rng.cnormal(g.lattice(a_dp.grid[1], a_dp.otype[1])) lhs_dp = rng.cnormal(g.lattice(a_dp.grid[0], a_dp.otype[0])) if rhs_dp.grid.cb.n == 1: parities = [(g.full, g.full)] elif a_dp.parity == g.even: parities = [(g.even, g.even), (g.odd, g.odd)] elif a_dp.parity == g.odd: parities = [(g.odd, g.even), (g.even, g.odd)] else: assert False for lp, rp in parities: if lp != g.full: rhs_dp.checkerboard(rp) lhs_dp.checkerboard(lp) rhs_sp = g.convert(rhs_dp, g.single) lhs_sp = g.convert(lhs_dp, g.single) # first test matrix ref_list = a_dp(lhs_dp, rhs_dp) cmp_list = g.convert(a_sp(lhs_sp, rhs_sp), g.double) for r, c in zip(ref_list, cmp_list): eps = g.norm2(r - c) ** 0.5 / g.norm2(r) ** 0.5 g.message(f"Verify single <> double for {atag}: {eps}") assert eps < eps_ref # then test adjoint matrix ref_list = a_dp.adj()(lhs_dp, rhs_dp) cmp_list = g.convert(a_sp.adj()(lhs_sp, rhs_sp), g.double) for r, c in zip(ref_list, cmp_list): eps = g.norm2(r - c) ** 0.5 / g.norm2(r) ** 0.5 g.message(f"Verify single <> double for {atag}.adj(): {eps}") assert eps < eps_ref elif isinstance(a_dp, g.matrix_operator): a_sp = getattr(fermion_sp, atag) rhs_dp = rng.cnormal(a_dp.vector_space[1].lattice()) lhs_dp = rng.cnormal(a_dp.vector_space[0].lattice()) if rhs_dp.grid.cb.n != 1: # for now test only odd cb rhs_dp.checkerboard(g.odd) lhs_dp.checkerboard(g.odd) rhs_sp = g.convert(rhs_dp, g.single) lhs_sp = g.convert(lhs_dp, g.single) # first test matrix ref = a_dp(rhs_dp) eps = ( g.norm2(ref - g.convert(a_sp(rhs_sp), g.double)) ** 0.5 / g.norm2(ref) ** 0.5 ) g.message(f"Verify single <> double for {atag}: {eps}") assert eps < eps_ref # then test adjoint matrix if a_dp.adj_mat is not None: ref = a_dp.adj()(lhs_dp) eps = ( g.norm2(ref - g.convert(a_sp.adj()(lhs_sp), g.double)) ** 0.5 / g.norm2(ref) ** 0.5 ) g.message(f"Verify single <> double for {atag}.adj(): {eps}") assert eps < eps_ref if a_dp.inv_mat is not None: ref = a_dp.inv()(lhs_dp) eps = ( g.norm2(ref - g.convert(a_sp.inv()(lhs_sp), g.double)) ** 0.5 / g.norm2(ref) ** 0.5 ) g.message(f"Verify single <> double for {atag}.inv(): {eps}") assert eps < eps_ref ref = a_dp.adj().inv()(lhs_dp) eps = ( g.norm2(ref - g.convert(a_sp.adj().inv()(lhs_sp), g.double)) ** 0.5 / g.norm2(ref) ** 0.5 ) g.message(f"Verify single <> double for {atag}.adj().inv(): {eps}") assert eps < eps_ref
def mk_ceig(gf, job_tag, inv_type): timer = q.Timer(f"py:mk_ceig({job_tag},{inv_type})", True) timer.start() gpt_gf = g.convert(qg.gpt_from_qlat(gf), g.single) parity = g.odd params = get_lanc_params(job_tag, inv_type) q.displayln_info(f"mk_ceig: job_tag={job_tag} inv_type={inv_type}") q.displayln_info(f"mk_ceig: params={params}") fermion_params = params["fermion_params"] if "omega" in fermion_params: qm = g.qcd.fermion.zmobius(gpt_gf, fermion_params) else: qm = g.qcd.fermion.mobius(gpt_gf, fermion_params) w = g.qcd.fermion.preconditioner.eo2_ne(parity=parity)(qm) def make_src(rng): src = g.vspincolor(w.F_grid_eo) # src[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) rng.cnormal(src) src.checkerboard(parity) return src pit = g.algorithms.eigen.power_iteration(**params["pit_params"]) pit_ev, _, _ = pit(w.Mpc, make_src(g.random("lanc"))) q.displayln_info(f"mk_ceig: pit_ev={pit_ev}") # cheby = g.algorithms.polynomial.chebyshev(params["cheby_params"]) irl = g.algorithms.eigen.irl(params["irl_params"]) evec, ev = irl(cheby(w.Mpc), make_src(g.random("lanc"))) evals = g.algorithms.eigen.evals(w.Mpc, evec, check_eps2=1e-6, real=True) g.mem_report() # inv = g.algorithms.inverter # cparams = get_clanc_params(job_tag, inv_type) q.displayln_info(f"mk_ceig: cparams={cparams}") # grid_coarse = g.block.grid(w.F_grid_eo, [get_ls_from_fermion_params(fermion_params)] + cparams["block"]) nbasis = cparams["nbasis"] basis = evec[0:nbasis] b = g.block.map(grid_coarse, basis) for i in range(2): b.orthonormalize() del evec gc.collect() # ccheby = g.algorithms.polynomial.chebyshev(cparams["cheby_params"]) cop = b.coarse_operator(ccheby(w.Mpc)) # cstart = g.vcomplex(grid_coarse, nbasis) cstart[:] = g.vcomplex([1] * nbasis, nbasis) eps2 = g.norm2(cop * cstart - b.project * ccheby(w.Mpc) * b.promote * cstart) / g.norm2(cstart) g.message(f"Test coarse-grid promote/project cycle: {eps2}") cirl = g.algorithms.eigen.irl(cparams["irl_params"]) cevec, cev = cirl(cop, cstart) # smoother = inv.cg(cparams["smoother_params"])(w.Mpc) smoothed_evals = [] tmpf = g.lattice(basis[0]) for i, cv in enumerate(cevec): tmpf @= smoother * b.promote * cv smoothed_evals = smoothed_evals + g.algorithms.eigen.evals( w.Mpc, [tmpf], check_eps2=10, real=True) g.mem_report() # timer.stop() return basis, cevec, smoothed_evals
import numpy as np import sys conf = g.default.get_single("--conf", None) g.message(f"Fixing {conf}") evec_in = ( "/gpfs/alpine/phy138/proj-shared/phy138flavor/lehner/runs/summit-96I-" + conf + "-256/lanczos.output") evec_out = ( "/gpfs/alpine/phy138/proj-shared/phy138flavor/lehner/runs/summit-96I-" + conf + "-256/lanczos.output.fixed") fmt = g.format.cevec({"nsingle": 100, "max_read_blocks": 16}) U = g.convert( g.load( "/gpfs/alpine/phy138/proj-shared/phy138flavor/chulwoo/evols/96I2.8Gev/evol0/configurations/ckpoint_lat." + conf), g.single, ) eps_norm = 1e-4 eps2_evec = 1e-5 eps_eval = 1e-2 nskip = 1 load_from_alternative_scheme = True qz = g.qcd.fermion.mobius( U, { "mass": 0.00054, "M5": 1.8, "b": 1.5, "c": 0.5,
# Desc.: Test small core features that are not sufficiently complex # to require a separate test file. These tests need to be fast. # import gpt as g import numpy as np import sys, cgpt # grid L = [16, 12, 12, 24] grid_dp = g.grid(L, g.double) grid_sp = g.grid(L, g.single) # test fields rng = g.random("test") l_dp = rng.cnormal(g.vcolor(grid_dp)) l_sp = g.convert(l_dp, g.single) # and convert precision l_dp_prime = g.convert(l_sp, g.double) eps2 = g.norm2(l_dp - l_dp_prime) / g.norm2(l_dp) assert eps2 < 1e-14 eps2 = g.norm2(l_dp[0, 0, 0, 0] - l_sp[0, 0, 0, 0]) assert eps2 < 1e-14 ################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x)
# # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys import time # load configuration # U = g.load("/hpcgpfs01/work/clehner/configs/16I_0p01_0p04/ckpoint_lat.IEEE64BIG.1100") rng = g.random("test") U = g.qcd.gauge.random(g.grid([8, 8, 8, 8], g.double), rng, scale=0.5) g.message("Plaquette:", g.qcd.gauge.plaquette(U)) # do everything in single-precision U = g.convert(U, g.single) # use the gauge configuration grid grid = U[0].grid # mobius <> zmobius domain wall quark mobius_params = { "mass": 0.08, "M5": 1.8, "b": 1.5, "c": 0.5, "Ls": 12, "boundary_phases": [1.0, 1.0, 1.0, 1.0], } qm = g.qcd.fermion.mobius(g.qcd.gauge.unit(grid), mobius_params)
q.qremove_all_info("results") q.qmkdir_info("results") total_site = [4, 4, 4, 8] geo = q.Geometry(total_site, 1) q.displayln_info("geo.show() =", geo.show()) rs = q.RngState("seed") grid = qg.mk_grid(geo) rng = g.random("test") gpt_gf = g.qcd.gauge.random(grid, rng, scale=0.5) q.displayln_info( f"g.qcd.gauge.plaquette = {g.qcd.gauge.plaquette(gpt_gf):.17f}") gpt_gf_f = g.convert(gpt_gf, g.single) q.displayln_info( f"g.qcd.gauge.plaquette = {g.qcd.gauge.plaquette(gpt_gf_f):.17f} single precision" ) gf = qg.qlat_from_gpt(gpt_gf) gf.show_info() mobius_params = { "mass": 0.08, "M5": 1.8, "b": 1.5, "c": 0.5, "Ls": 12,