def mk_eig(gf, job_tag, inv_type): timer = q.Timer(f"py:mk_eig({job_tag},{inv_type})", True) timer.start() gpt_gf = g.convert(qg.gpt_from_qlat(gf), g.single) parity = g.odd params = get_lanc_params(job_tag, inv_type) q.displayln_info(f"mk_eig: job_tag={job_tag} inv_type={inv_type}") q.displayln_info(f"mk_eig: params={params}") fermion_params = params["fermion_params"] if "omega" in fermion_params: qm = g.qcd.fermion.zmobius(gpt_gf, fermion_params) else: qm = g.qcd.fermion.mobius(gpt_gf, fermion_params) w = g.qcd.fermion.preconditioner.eo2_ne(parity=parity)(qm) def make_src(rng): src = g.vspincolor(w.F_grid_eo) # src[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) rng.cnormal(src) src.checkerboard(parity) return src pit = g.algorithms.eigen.power_iteration(**params["pit_params"]) pit_ev, _, _ = pit(w.Mpc, make_src(g.random("lanc"))) q.displayln_info(f"mk_eig: pit_ev={pit_ev}") # cheby = g.algorithms.polynomial.chebyshev(params["cheby_params"]) irl = g.algorithms.eigen.irl(params["irl_params"]) evec, ev = irl(cheby(w.Mpc), make_src(g.random("lanc"))) evals = g.algorithms.eigen.evals(w.Mpc, evec, check_eps2=1e-6, real=True) g.mem_report() # timer.stop() return evec, evals
def perform(self, root): global basis_size, sloppy_per_job, T, current_config, compress_ratio if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) U = current_config.U reduced_mpi = [x for x in U[0].grid.mpi] for i in range(len(reduced_mpi)): if reduced_mpi[i] % 2 == 0: reduced_mpi[i] //= 2 # create random selection of points with same spatial sites on each sink time slice # use different spatial sites for each source time-slice # this should be optimal for the local operator insertions rng = g.random(f"sparse2_{self.conf}_{self.t}") grid = U[0].grid t0 = grid.ldimensions[3] * grid.processor_coor[3] t1 = t0 + grid.ldimensions[3] spatial_sites = int(compress_ratio * np.prod(grid.ldimensions[0:3])) spatial_coordinates = rng.choice(g.coordinates(U[0]), spatial_sites) local_coordinates = np.repeat(spatial_coordinates, t1 - t0, axis=0) for t in range(t0, t1): local_coordinates[t - t0::t1 - t0, 3] = t sdomain = g.domain.sparse(current_config.l_exact.U_grid, local_coordinates) half_peramb = {"sparse_domain": sdomain} for i0 in range(0, basis_size, sloppy_per_job): for l in g.load( f"{root}/{self.conf}/pm_{self.solver}_t{self.t}_i{i0}/propagators" ): for x in l: S = sdomain.lattice(l[x].otype) sdomain.project(S, l[x]) half_peramb[x] = S g.message(x) g.save( f"{root}/{self.name}/propagators", half_peramb, g.format.gpt({"mpi": reduced_mpi}), )
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # import gpt as g import numpy as np # load configuration # U = g.load("/hpcgpfs01/work/clehner/configs/openQCD/A250t000n54") U = g.qcd.gauge.random(g.grid([24, 24, 24, 32], g.double), g.random("T")) # do everything in single-precision U = g.convert(U, g.single) # use the gauge configuration grid grid = U[0].grid L = np.array(grid.fdimensions) # quark w = g.qcd.fermion.wilson_clover( U, { "kappa": 0.137, "csw_r": 0, "csw_t": 0, "xi_0": 1, "nu": 1, "isAnisotropic": False, "boundary_phases": [1.0, 1.0, 1.0, -1.0], }, )
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Test basic QIS interface # import gpt as g import numpy as np from gpt.qis.gate import * # need a random number generator for measurements r = g.random("qis_test", "vectorized_ranlux24_24_64") n = g.default.get_int("--n", 16) N = g.default.get_int("--N", 10) for precision in [g.single, g.double]: g.mem_report() for q in [g.qis.backends.dynamic]: # g.qis.backends.static, g.message(f""" Run tests with {n} qubits {precision.__name__} precision {q.__name__} backend """) stR = q.state(r, n, precision=precision) stR.randomize()
def __call__(self, mat, src, ckpt=None): # verbosity verbose = g.default.is_verbose("irl") # checkpointer if ckpt is None: ckpt = g.checkpointer_none() ckpt.grid = src.grid self.ckpt = ckpt # first approximate largest eigenvalue pit = g.algorithms.eigen.power_iteration(eps=0.05, maxiter=10, real=True) lambda_max = pit(mat, src)[0] # parameters Nm = self.params["Nm"] Nu = self.params["Nu"] Nk = self.params["Nk"] Nstop = self.params["Nstop"] Np = Nm-Nk MaxIter=self.params["maxiter"] Np /= MaxIter assert Nm >= Nk and Nstop <= Nk print ( 'Nm=',Nm,'Nu=',Nu,'Nk=',Nk ) # tensors dtype = np.float64 ctype = np.complex128 lme = np.zeros((Nu,Nm), ctype) lmd = np.zeros((Nu,Nm), ctype) lme2 = np.zeros((Nu,Nm), ctype) lmd2 = np.empty((Nu,Nm), ctype) Qt = np.zeros((Nm,Nm),ctype) Q = np.zeros((Nm,Nm),ctype) ev = np.empty((Nm,), dtype) ev2_copy = np.empty((Nm,), dtype) # fields f = g.lattice(src) v = g.lattice(src) evec = [g.lattice(src) for i in range(Nm)] w = [g.lattice(src) for i in range(Nu)] w_copy = [g.lattice(src) for i in range(Nu)] # advice memory storage if not self.params["advise"] is None: g.advise(evec, self.params["advise"]) # scalars k1 = 1 k2 = Nk beta_k = 0.0 rng=g.random("test") # set initial vector # rng.zn(w) for i in range(Nu): rng.zn(w[i]) if i > 0: g.orthogonalize(w[i],evec[0:i]) evec[i]=g.copy(w[i]) evec[i] *= 1.0/ g.norm2(evec[i]) ** 0.5 g.message("norm(evec[%d]=%e "%(i,g.norm2(evec[i]))) if i > 0: for j in range(i): ip=g.innerProduct(evec[j],w[i]) if np.abs(ip) >1e-6: g.message("inner(evec[%d],w[%d])=%e %e"% (j,i,ip.real,ip.imag)) # evec[i] @= src[i] / g.norm2(src[i]) ** 0.5 # initial Nk steps Nblock_k = int(Nk/Nu) for b in range(Nblock_k): self.blockStep(mat, lmd, lme, evec, w, w_copy, Nm, b,Nu) Nblock_p = int(Np/Nu) # restarting loop # for it in range(self.params["maxiter"]): for it in range(MaxIter): if verbose: g.message("Restart iteration %d" % it) Nblock_l = Nblock_k + it*Nblock_p; Nblock_r = Nblock_l + Nblock_p; Nl = Nblock_l*Nu Nr = Nblock_r*Nu # ev2.resize(Nr) ev2 = np.empty((Nr,), dtype) for b in range(Nblock_l, Nblock_r): self.blockStep(mat, lmd, lme, evec, w, w_copy, Nm, b,Nu) for u in range(Nu): for k in range(Nr): lmd2[u,k]=lmd[u,k] lme2[u,k]=lme[u,k] Qt = np.identity(Nr, ctype) # diagonalize t0 = g.time() # self.diagonalize(ev2, lme2, Nm, Qt) self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt) # def diagonalize(self, eval, lmd, lme, Nu, Nk, Nm, Qt): t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) # sort ev2_copy = ev2.copy() ev2 = list(reversed(sorted(ev2))) for i in range(Nr): g.message("Rval[%d]= %e"%(i,ev2[i])) # rotate # t0 = g.time() # g.rotate(evec, Qt, k1 - 1, k2 + 1, 0, Nm) # t1 = g.time() # if verbose: # g.message("Basis rotation took %g s" % (t1 - t0)) # convergence test if it >= self.params["Nminres"]: if verbose: g.message("Rotation to test convergence") # diagonalize for k in range(Nr): ev2[k] = ev[k] # lme2[k] = lme[k] for u in range(Nu): for k in range(Nr): lmd2[u,k]=lmd[u,k] lme2[u,k]=lme[u,k] Qt = np.identity(Nm, ctype) t0 = g.time() # self.diagonalize(ev2, lme2, Nk, Qt) self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt) t1 = g.time() if verbose: g.message("Diagonalization took %g s" % (t1 - t0)) B = g.copy(evec[0]) allconv = True if beta_k >= self.params["betastp"]: jj = 1 while jj <= Nstop: j = Nstop - jj g.linear_combination(B, evec[0:Nr], Qt[j, 0:Nr]) g.message("norm=%e"%(g.norm2(B))) B *= 1.0 / g.norm2(B) ** 0.5 if not ckpt.load(v): mat(v, B) ckpt.save(v) ev_test = g.innerProduct(B, v).real eps2 = g.norm2(v - ev_test * B) / lambda_max ** 2.0 if verbose: g.message( "%-65s %-45s %-50s" % ( "ev[ %d ] = %s" % (j, ev2_copy[j]), "<B|M|B> = %s" % (ev_test), "|M B - ev B|^2 / ev_max^2 = %s" % (eps2), ) ) if eps2 > self.params["resid"]: allconv = False if jj == Nstop: break jj = min([Nstop, 2 * jj]) if allconv: if verbose: g.message("Converged in %d iterations" % it) break t0 = g.time() g.rotate(evec, Qt, 0, Nstop, 0, Nk) t1 = g.time() if verbose: g.message("Final basis rotation took %g s" % (t1 - t0)) return (evec[0:Nstop], ev2_copy[0:Nstop])
prop_l_sloppy = l_exact.propagator(light_sloppy_inverter).grouped(6) prop_l_exact = l_exact.propagator(light_exact_inverter).grouped(6) # show available memory g.mem_report(details=False) # per job for group, job, conf, jid, n in run_jobs: g.message(f""" Job {jid} / {n} : configuration {conf}, job tag {job} """) job_seed = job.split("_correlated")[0] rng = g.random(f"hvp-conn-a2a-ensemble-{conf}-{job_seed}") source_positions_low = [[ rng.uniform_int(min=0, max=L[i] - 1) for i in range(4) ] for j in range(jobs[job]["low"])] source_positions_sloppy = [[ rng.uniform_int(min=0, max=L[i] - 1) for i in range(4) ] for j in range(jobs[job]["sloppy"])] source_positions_exact = [[ rng.uniform_int(min=0, max=L[i] - 1) for i in range(4) ] for j in range(jobs[job]["exact"])] all_time_slices = jobs[job]["all_time_slices"] use_source_time_slices = source_time_slices if not all_time_slices: use_source_time_slices = 1
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys, cmath # load configuration rng = g.random("test") L = [8, 8, 8, 16] U = g.qcd.gauge.random(g.grid(L, g.double), rng) # do everything in single-precision U = g.convert(U, g.single) # plaquette g.message("Plaquette:", g.qcd.gauge.plaquette(U)) # use the gauge configuration grid grid = U[0].grid # wilson parameters p = { "kappa": 0.137, "csw_r": 0.0, "csw_t":
#!/usr/bin/env python3 import gpt as g import numpy as np import os, sys rng = g.random("test") # cold start U = g.qcd.gauge.unit(g.grid([48, 48, 48, 192], g.double)) latest_it = None it0 = 0 dst = g.default.get("--root", None) N = 4000 for it in range(N): if os.path.exists(f"{dst}/ckpoint_lat.{it}"): latest_it = it if latest_it is not None: g.copy(U, g.load(f"{dst}/ckpoint_lat.{latest_it}")) rng = g.random(f"test{dst}{latest_it}", "vectorized_ranlux24_24_64") it0 = latest_it + 1 # gauge field obc def project_open_bc(f): f[3][:, :, :, f[3].grid.gdimensions[3] - 1] = 0 return f project_open_bc(U)
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Test basic QIS interface # import gpt as g import numpy as np from gpt.qis.gate import * # need a random number generator for measurements r = g.random("qis_test") n = g.default.get_int("--n", 16) N = g.default.get_int("--N", 10) for precision in [g.single, g.double]: for q in [g.qis.backends.static, g.qis.backends.dynamic]: g.message(f""" Run tests with {n} qubits {precision.__name__} precision {q.__name__} backend """) stR = q.state(r, n, precision=precision) stR.randomize()
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Benchmark Matrix Multiplication # import gpt as g # mute random number generation g.default.set_verbose("random", False) rng = g.random("benchmark") # main test loop for precision in [g.single, g.double]: grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision) N = 10 Nwarmup = 5 g.message(f""" Matrix Multiply Benchmark with fdimensions : {grid.fdimensions} precision : {precision.__name__} """) # Source and destination for tp in [ g.ot_matrix_color(3), g.ot_matrix_spin(4), g.ot_matrix_spin_color(4, 3) ]: one = g.lattice(grid, tp) two = g.lattice(grid, tp)
######################################################## # staggered parameters p = { "mass": 0, "mu5": 1, "hop": 0, "boundary_phases": [1.0, 1.0, 1.0, 1.0], } # grid (each dimension must be at least 4 to get correct sum rule) L = [8, 4, 4, 4] grid_dp = g.grid(L, g.double) grid_sp = g.grid(L, g.single) # SU(2) fundamental U = g.qcd.gauge.random(grid_sp, g.random("test"), otype=g.ot_matrix_su2_fundamental()) ev = run_test(U) # SU(2) adjoint U = g.qcd.gauge.random(grid_sp, g.random("test"), otype=g.ot_matrix_su2_adjoint()) run_test(U) # SU(3) fundamental U = g.qcd.gauge.random(grid_sp, g.random("test")) run_test(U)
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Test polynomials # import gpt as g import numpy as np # grid & rng grid = g.grid([8, 8, 8, 16], g.double) rng = g.random("rng") # chebyshev scalar function against operator function N = 10 low = 0.1 high = 0.78 hard_code_T = { 5: lambda x: 5.0 * x - 20.0 * x**3.0 + 16.0 * x**5.0, 8: lambda x: 1.0 - 32.0 * x**2.0 + 160.0 * x**4.0 - 256.0 * x**6.0 + 128.0 * x **8.0, } for order in hard_code_T.keys(): g.message(f"Cheby tests with low = {low}, high = {high}, order = {order}") c = g.algorithms.polynomial.chebyshev(low=low, high=high, order=order) # check scalar/lattice/hard_coded for val in [rng.uniform_real() for i in range(N)]:
random = {p_rng_seed} Note: convergence is only guaranteed for sufficiently small step parameter. """) if p_source is None: g.message("Need to provide source file") sys.exit(1) if p_mpi_split is None: g.message("Need to provide mpi_split") sys.exit(1) # create rng if needed rng = None if p_rng_seed is None else g.random(p_rng_seed) # load source U = g.load(p_source) # split in time Nt = U[0].grid.gdimensions[3] g.message(f"Separate {Nt} time slices") Usep = [g.separate(u, 3) for u in U[0:3]] Vt = [g.mcolor(Usep[0][0].grid) for t in range(Nt)] cache = {} split_grid = Usep[0][0].grid.split(p_mpi_split, Usep[0][0].grid.fdimensions) g.message("Split grid") Usep_split = [g.split(Usep[mu], split_grid, cache) for mu in range(3)] Vt_split = g.split(Vt, split_grid, cache)
#!/usr/bin/env python3 # # Authors: Daniel Richtmann 2020 # Christoph Lehner 2020 # # Desc.: Check correctness of chiral splitting # import gpt as g import numpy as np # define grids grid = g.grid([8, 8, 8, 8], g.double) # setup rng rng = g.random("ducks_smell_funny") # size of basis nbasis_f = 30 nbasis_c = 40 nb_f = nbasis_f // 2 nb_c = nbasis_c // 2 # setup fine basis basis_ref_f = [g.vspincolor(grid) for __ in range(nb_f)] basis_split_f = [g.vspincolor(grid) for __ in range(nbasis_f)] rng.cnormal(basis_ref_f) # setup coarse basis basis_ref_c = [g.vcomplex(grid, nbasis_f) for __ in range(nb_c)] basis_split_c = [g.vcomplex(grid, nbasis_f) for __ in range(nbasis_c)] rng.cnormal(basis_ref_c)
return 1 ################################################################# # test sum rules for different gauge groups and representations # ################################################################# # staggered parameters p = { "mass": .897, "hop": 1, "mu5": complex(1.23, .537), "boundary_phases": [1.0, 1.0, 1.0, 1.0], } # grid (each dimension must be at least 4 to get correct sum rule) L = [8, 4, 4, 4] grid_dp = g.grid(L, g.double) # SU(2) fundamental U = g.qcd.gauge.random(grid_dp, g.random("test"), otype=g.ot_matrix_su2_fundamental()) test_sumrule(U, p) # SU(2) adjoint U = g.qcd.gauge.random(grid_dp, g.random("test"), otype=g.ot_matrix_su2_adjoint()) test_sumrule(U, p) # SU(3) fundamental U = g.qcd.gauge.random(grid_dp, g.random("test")) test_sumrule(U, p)
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Benchmark RNG # import gpt as g g.default.set_verbose("random", False) for engine in ["vectorized_ranlux24_24_64", "vectorized_ranlux24_389_64"]: rng = g.random("benchmark", engine) for precision in [g.single, g.double]: grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision) g.message(f""" Benchmark RNG engine {engine} in {precision.__name__} precision """) for lattice in [g.complex, g.vspincolor, g.mspincolor]: # Source and destination dst = lattice(grid) # random source for i in range(3): t0 = g.time() rng.uniform_real(dst)
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Benchmark Dslash # import gpt as g g.default.set_verbose("random", False) rng = g.random("benchmark", "vectorized_ranlux24_24_64" ) # faster rng sufficient for benchmarking purposes for precision in [g.single, g.double]: grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision) N = g.default.get_int("--N", 1000) Ls = g.default.get_int("--Ls", 8) g.message(f""" DWF Dslash Benchmark with fdimensions : {grid.fdimensions} precision : {precision.__name__} Ls : {Ls} """) # Use Mobius operator qm = g.qcd.fermion.mobius( g.qcd.gauge.random(grid, rng, scale=0.5), { "mass": 0.08, "M5": 1.8, "b": 1.5, "c": 0.5,
#!/usr/bin/env python3 # # Authors: Christoph Lehner, Mattia Bruno 2021 # import gpt as g import numpy import sys # grid grid = g.grid([8, 8, 8, 8], g.double) rng = g.random("scalar!") phi = g.real(grid) rng.element(phi) actions = [ g.qcd.scalar.action.mass_term(), g.qcd.scalar.action.phi4(0.119, 0.01) ] for a in actions: g.message(a.__name__) a.assert_gradient_error(rng, phi, phi, 1e-5, 1e-7)
#!/usr/bin/env python3 # # Authors: Christoph Lehner, Mattia Bruno 2021 # # HMC for phi^4 scalar theory # import gpt as g import sys, os import numpy beta = g.default.get_float("--beta", 5.96) g.default.set_verbose("omf4") grid = g.grid([8, 8, 8, 16], g.double) rng = g.random("hmc-pure-gauge") U = g.qcd.gauge.unit(grid) rng.normal_element(U) # conjugate momenta mom = g.group.cartesian(U) # Log g.message(f"Lattice = {grid.fdimensions}") g.message("Actions:") # action for conj. momenta a0 = g.qcd.scalar.action.mass_term() g.message(f" - {a0.__name__}") # wilson action
#!/usr/bin/env python3 import gpt as g grid = g.grid([8, 8, 8, 16], g.double) rng = g.random("d") prop = g.mspincolor(grid) rng.cnormal(prop) w = g.qcd.wick() x, y = w.coordinate(2) ud_propagators = { (x, y): prop[0, 0, 0, 0], (y, x): g(g.gamma[5] * g.adj(prop[0, 0, 0, 0]) * g.gamma[5]), } u = w.fermion(ud_propagators) d = w.fermion(ud_propagators) s = w.fermion(ud_propagators) na = w.color_index() nalpha, nbeta = w.spin_index(2) C = 1j * g.gamma[1].tensor() * g.gamma[3].tensor() Cg5 = w.spin_matrix(C * g.gamma[5].tensor()) Pp = w.spin_matrix((g.gamma["I"].tensor() + g.gamma[3].tensor()) * 0.5) ##### # Baryon tests
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys # load configuration U = g.qcd.gauge.random(g.grid([8, 8, 8, 8], g.single), g.random("test")) # wilson w = g.qcd.fermion.wilson_clover( U, { "kappa": 0.137, "csw_r": 0, "csw_t": 0, "xi_0": 1, "nu": 1, "isAnisotropic": False, "boundary_phases": [1.0, 1.0, 1.0, 1.0], }, ) expected_largest_eigenvalue = 7.437868841644861 + 0.012044335728622612j # start vector start = g.vspincolor(w.F_grid)
#!/usr/bin/env python3 # # Authors: Christoph Lehner, Mattia Bruno 2021 # import gpt as g import numpy import sys # grid grid = g.grid([4, 4, 4, 8], g.single) rng = g.random("3.14") # we generate data with probability \prod_i dx_i exp(-\sum x_i^2) x = g.real(grid) x[:] = 0 dx = g.lattice(x) metropolis = g.algorithms.markov.metropolis(rng) def measure(x): return [g.sum(x).real / grid.fsites, g.norm2(x) / grid.fsites] eps = 0.08 for i in range(10): rng.uniform_element(dx) trial = metropolis(x) f_before = g.norm2(x) x += eps * dx
# Authors: Christoph Lehner 2020 # # Desc.: Test small core features that are not sufficiently complex # to require a separate test file. These tests need to be fast. # import gpt as g import numpy as np import sys, cgpt # grid L = [16, 16, 16, 32] grid_dp = g.grid(L, g.double) grid_sp = g.grid(L, g.single) # test fields l_dp = g.random("test").cnormal(g.vcolor(grid_dp)) l_sp = g.convert(l_dp, g.single) ################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x) assert mv.itemsize == 1 and mv.shape[0] == len(mv) assert sys.getrefcount(x) == 3 del mv assert sys.getrefcount(x) == 2 ################################################################################ # Test assignments
#!/usr/bin/env python3 # # Authors: Christoph Lehner, Mattia Bruno 2021 # # HMC for phi^4 scalar theory # import gpt as g import sys, os import numpy grid = g.grid([16, 16, 16, 16], g.double) rng = g.random("hmc-phi4") phi = g.real(grid) rng.element(phi, scale=0.2) # conjugate momenta mom = g.group.cartesian(phi) # action for conj. momenta g.message(f"Lattice = {grid.fdimensions}") g.message("Actions:") a0 = g.qcd.scalar.action.mass_term() g.message(f" - {a0.__name__}") # phi^4 action kappa = 0.1119 l = 0.01234 a1 = g.qcd.scalar.action.phi4(kappa, l) g.message(f" - {a1.__name__}") g.message(f"phi4 mass = {a1.kappa_to_mass(kappa, l, grid.nd)}")
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys grid_dp = g.grid([8, 4, 4, 4], g.double) grid_sp = g.grid([8, 4, 4, 4], g.single) rng = g.random("block_seed_string_13") for grid, prec in [(grid_dp, 1e-28), (grid_sp, 1e-14)]: U = g.qcd.gauge.random(grid, rng, scale=10) g.message(g.qcd.gauge.plaquette(U)) for i in range(4): test = g.norm2(g.adj(U[i]) * U[i] - g.qcd.gauge.unit(grid)[0]) / g.norm2(U[i]) g.message(test) assert test < prec rng = g.random("block_seed_string_13") n = 10000 res = {} for i in range(n): z = rng.zn() if z not in res: res[z] = 0 res[z] += 1
assert cb in [g.even, g.odd] cbgrid = g.grid( grid.gdimensions, grid.precision, g.redblack, parent=grid.parent, mpi=grid.mpi, ) cbfield = g.vspincolor(cbgrid) g.pick_checkerboard(cb, cbfield, field) return cbfield # setup silent rng, mute g.default.set_verbose("random", False) rng = g.random("openqcd_dslash") # fermion operator params wc_params = { "kappa": 0.13500, "csw_r": 1.978, "csw_t": 1.978, "cF": 1.3, "xi_0": 1, "nu": 1, "isAnisotropic": False, "boundary_phases": [1.0, 1.0, 1.0, 0.0], } # workdir if "WORK_DIR" in os.environ:
#!/usr/bin/env python3 # # Authors: Daniel Richtmann 2020 # Christoph Lehner 2020 # # Desc.: Test multigrid for clover # import gpt as g import numpy as np # setup rng, mute g.default.set_verbose("random", False) rng = g.random("test_mg") # adjust volume for mpi layout of test L = [8, 8, 8, 16] mpi = g.default.get_ivec("--mpi", [1, 1, 1, 1], 4) simd = [1, 2, 2, 2] l = [L[i] // mpi[i] // simd[i] for i in range(4)] l_min = [4, 4, 4, 4] for i in range(4): if l[i] < l_min[i]: L[i] *= l_min[i] // l[i] g.message(f"Run with L = {L}") # setup gauge field U = g.qcd.gauge.random(g.grid(L, g.single), rng) # quark w = g.qcd.fermion.wilson_clover( U,
#!/usr/bin/env python3 # # Authors: Christoph Lehner, Tilo Wettig 2020 # import gpt as g import numpy as np import sys # grid L = [8, 8, 8, 8] grid = g.grid(L, g.single) grid_eo = g.grid(L, g.single, g.redblack) # cold start g.default.push_verbose("random", False) rng = g.random("test", "vectorized_ranlux24_24_64") U = [g.complex(grid) for i in range(4)] for mu in range(len(U)): U[mu][:] = 1 # red/black mask mask_rb = g.complex(grid_eo) mask_rb[:] = 1 # full mask mask = g.complex(grid) # simple plaquette action def staple(U, mu): st = g.lattice(U[0])
#!/usr/bin/env python3 # # Authors: Christoph Lehner 2020 # # Desc.: Illustrate core concepts and features # import gpt as g import numpy as np import sys import time # load configuration # U = g.load("/hpcgpfs01/work/clehner/configs/16I_0p01_0p04/ckpoint_lat.IEEE64BIG.1100") rng = g.random("test") U = g.qcd.gauge.random(g.grid([8, 8, 8, 8], g.double), rng, scale=0.5) g.message("Plaquette:", g.qcd.gauge.plaquette(U)) # do everything in single-precision U = g.convert(U, g.single) # use the gauge configuration grid grid = U[0].grid # mobius <> zmobius domain wall quark mobius_params = { "mass": 0.08, "M5": 1.8, "b": 1.5, "c": 0.5, "Ls": 12, "boundary_phases": [1.0, 1.0, 1.0, 1.0],
#!/usr/bin/env python3 # # Authors: Daniel Richtmann 2020 # Christoph Lehner 2020 # # Desc.: Exercise linear solvers # import gpt as g import numpy as np import sys import time import os.path # load configuration precision = g.double U = g.qcd.gauge.random(g.grid([8, 8, 8, 16], precision), g.random("test")) # use the gauge configuration grid grid = U[0].grid # quark w = g.qcd.fermion.wilson_clover( U, { "kappa": 0.13565, "csw_r": 2.0171 / 2.0, # for now test with very heavy quark "csw_t": 2.0171 / 2.0, "xi_0": 1, "nu": 1, "isAnisotropic": False, "boundary_phases": [1.0, 1.0, 1.0, 1.0],