def perform(self, root): global basis_size, T, current_config if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) c = None vcj = [ g.vcolor(current_config.l_exact.U_grid) for jr in range(basis_size) ] for vcjj in vcj: vcjj[:] = 0 for tprime in range(T): basis_evec, basis_evals = g.load(self.basis_fmt % (self.conf, tprime)) plan = g.copy_plan(vcj[0], basis_evec[0], embed_in_communicator=vcj[0].grid) c = g.coordinates(basis_evec[0]) plan.destination += vcj[0].view[np.hstack( (c, np.ones((len(c), 1), dtype=np.int32) * tprime))] plan.source += basis_evec[0].view[c] plan = plan() for l in range(basis_size): plan(vcj[l], basis_evec[l]) for l in range(basis_size): g.message("Check norm:", l, g.norm2(vcj[l])) g.save(f"{root}/{self.name}/basis", vcj)
def compute_eig(gf, job_tag, inv_type, *, path=None, nsingle=10, mpi=[1, 1, 1, 4]): # return a function ``get_eig'' # ``get_eig()'' return the ``eig'' load_eig = load_eig_lazy(job_tag, inv_type, path) if load_eig is not None: return load_eig # evec, evals = ru.mk_eig(gf, job_tag, inv_type) basis, cevec, smoothed_evals = ru.mk_ceig(gf, job_tag, inv_type) eig = [basis, cevec, smoothed_evals] fmt = g.format.cevec({ "nsingle": nsingle, "mpi": [1] + mpi, "max_read_blocks": 8 }) if path is not None: g.save(get_save_path(path), eig, fmt) def get_eig(): return eig return get_eig
def perform(self, root): global basis_size, sloppy_per_job, T, current_config, compress_ratio if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) U = current_config.U reduced_mpi = [x for x in U[0].grid.mpi] for i in range(len(reduced_mpi)): if reduced_mpi[i] % 2 == 0: reduced_mpi[i] //= 2 # create random selection of points with same spatial sites on each sink time slice # use different spatial sites for each source time-slice # this should be optimal for the local operator insertions rng = g.random(f"sparse2_{self.conf}_{self.t}") grid = U[0].grid t0 = grid.ldimensions[3] * grid.processor_coor[3] t1 = t0 + grid.ldimensions[3] spatial_sites = int(compress_ratio * np.prod(grid.ldimensions[0:3])) spatial_coordinates = rng.choice(g.coordinates(U[0]), spatial_sites) local_coordinates = np.repeat(spatial_coordinates, t1 - t0, axis=0) for t in range(t0, t1): local_coordinates[t - t0::t1 - t0, 3] = t sdomain = g.domain.sparse(current_config.l_exact.U_grid, local_coordinates) half_peramb = {"sparse_domain": sdomain} for i0 in range(0, basis_size, sloppy_per_job): for l in g.load( f"{root}/{self.conf}/pm_{self.solver}_t{self.t}_i{i0}/propagators" ): for x in l: S = sdomain.lattice(l[x].otype) sdomain.project(S, l[x]) half_peramb[x] = S g.message(x) g.save( f"{root}/{self.name}/propagators", half_peramb, g.format.gpt({"mpi": reduced_mpi}), )
U0prime = g.lattice(U[0]) U0prime[:] = 0 sdomain.promote(U0prime, S) assert np.linalg.norm(U0prime[sdomain.local_coordinates] - U[0][sdomain.local_coordinates]) < 1e-14 s_slice = sdomain.slice(S, 3) # save in default gpt format g.save( f"{work_dir}/out", { "va\nl": [ 0, 1, 3, "tes\n\0t", 3.123456789123456789, 1.123456789123456789e-7, 1 + 3.1231251251234123413j, ], # fundamental data types "np": g.coordinates(U[0].grid), # write numpy array from root node "U": U, # write list of lattices "sdomain": sdomain, "S": S, }, ) # save in custom gpt format with different mpi distribution of local views g.save( f"{work_dir}/out2", { "val": [ 0,
feval = [rng.normal(mu=2.0, sigma=0.5).real for i in range(nevec)] for b in basis: b.checkerboard(g.odd) rng.cnormal([basis, cevec]) b = g.block.map(cgrid, basis) for i in range(2): b.orthonormalize() for mpi_layout in [[1, 1, 1, 1, 1], [1, 2, 2, 2, 2]]: # save in fixed layout g.save( f"{work_dir}/cevec", [basis, cevec, feval], g.format.cevec({ "nsingle": nsingle, "max_read_blocks": 8, "mpi": mpi_layout }), ) # and load again to verify basis2, cevec2, feval2 = g.load(f"{work_dir}/cevec", grids=fgrid) assert len(basis) == len(basis2) assert len(cevec) == len(cevec2) assert len(feval) == len(feval2) for i in range(len(basis)): eps2 = g.norm2(basis[i] - basis2[i]) / g.norm2(basis[i]) g.message(f"basis {i} resid {eps2}")
# # Production code to generate fine-grid basis vectorscoarse-grid eigenvectors using existing # import gpt as g # parameters fn = g.default.get("--params", "params.txt") params = g.params(fn, verbose=True) # load configuration U = params["config"] # matrix to use fmatrix = params["fmatrix"](U) op = params["op"](fmatrix) grid = op.grid[0] # implicitly restarted lanczos irl = params["method_evec"] # run start = g.vspincolor(grid) start[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) start.checkerboard(g.odd) # traditionally, calculate odd-site vectors try: basis, ev = g.load("checkpoint", grids=grid) except g.LoadError: basis, ev = irl(op, start, params["checkpointer"]) g.save("checkpoint", (basis, ev))
basis = [g.vspincolor(fgrid) for i in range(nbasis)] cevec = [g.vcomplex(cgrid, nbasis) for i in range(nevec)] feval = [rng.normal(mu=2.0, sigma=0.5).real for i in range(nevec)] for b in basis: b.checkerboard(g.odd) rng.cnormal([basis, cevec]) b = g.block.map(cgrid, basis) for i in range(2): b.orthonormalize() # save in fixed layout g.save( f"{work_dir}/cevec", [basis, cevec, feval], g.format.cevec({ "nsingle": nsingle, "max_read_blocks": 16, "mpi": [1, 2, 2, 2, 2] }), ) # and load again to verify basis2, cevec2, feval2 = g.load(f"{work_dir}/cevec", {"grids": fgrid}) assert len(basis) == len(basis2) assert len(cevec) == len(cevec2) assert len(feval) == len(feval2) for i in range(len(basis)): eps2 = g.norm2(basis[i] - basis2[i]) / g.norm2(basis[i]) g.message(f"basis {i} resid {eps2}")
# load configuration U = g.load("/hpcgpfs01/work/clehner/configs/32IDfine/ckpoint_lat.200") # Show metadata of field g.message("Metadata", U[0].metadata) # to single precision #U = g.convert(U, g.single) # save in default gpt format g.save( "out", { "va\nl": [ 0, 1, 3, "tes\n\0t", 3.123456789123456789, 1.123456789123456789e-7, 1 + 3.1231251251234123413j ], # fundamental data types "np": g.coordinates(U[0].grid), # write numpy array from root node "U": U # write list of lattices }) # save in custom gpt format with different mpi distribution of local views g.save( "out2", { "val": [ 0, 1, 3, "test", 3.123456789123456789, 1.123456789123456789e-7, 1 + 3.1231251251234123413j ], # fundamental data types "np":
h1, s1 = hamiltonian(False) if no_accept_reject: return [True, s1 - s0, h1 - h0] else: return [accrej(h1, h0), s1 - s0, h1 - h0] accept, total = 0, 0 for it in range(it0, N): pure_gauge = it < 10 no_accept_reject = it < 100 g.message(pure_gauge, no_accept_reject) a, dS, dH = hmc(1.0) accept += a total += 1 plaq = g.qcd.gauge.plaquette(U) g.message( f"HMC {it} has P = {plaq}, dS = {dS}, dH = {dH}, acceptance = {accept/total}" ) for x in log.grad: g.message(f"{x} force norm2/sites =", np.mean(log.get(x)), "+-", np.std(log.get(x))) g.message(f"Timing:\n{log.time}") if it % 10 == 0: # reset statistics log.reset() g.message("Reset log") g.save(f"{dst}/ckpoint_lat.{it}", U, g.format.nersc()) #g.save(f"{dst}/ckpoint_lat.{it}", U)
try: evec, ev = g.load(path_to_evec, {"grids": w.F_grid_eo}) except g.LoadError: start = g.vspincolor(w.F_grid_eo) start[:] = g.vspincolor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) # generate eigenvectors evec, ev_cheby = irl( c(eo(w).NDagN), start, g.checkpointer("/hpcgpfs01/scratch/clehner/openQCD/checkpoint"), ) ev = g.algorithms.eigen.evals(eo(w).Mpc, evec, check_eps2=1e-8, real=True) # save eigenvectors g.save(path_to_evec, [evec, ev]) # build solver inv = g.algorithms.inverter dcg = inv.sequence(inv.deflate(evec, ev), inv.cg({ "eps": 1e-6, "maxiter": 1000 })) slv = w.propagator(inv.preconditioned(eo, dcg)) # propagator dst = g.mspincolor(grid) slv(dst, src) # two-point correlator = g.slice(g.trace(dst * g.adj(dst)), 3)
nbasis = params["nbasis"] # fg_basis,fg_cevec,fg_feval = g.load(params["basis"],{ # "grids" : q.F_grid_eo, "nmax" : nbasis, # "advise_basis" : g.infrequent_use, # "advise_cevec" : g.infrequent_use # }) rng = g.random("test") try: fg_basis = g.load("basis", {"grids": q.F_grid_eo})[0] except g.LoadError: fg_basis = g.advise( [g.vspincolor(q.F_grid_eo) for i in range(nbasis)], g.infrequent_use ) rng.zn(fg_basis) g.save("basis", [fg_basis]) # g.mem_report() # g.prefetch( fg_basis, g.to_accelerator) # g.mem_report() # w=fg_basis[-1] # g.orthogonalize(w,fg_basis[0:1]) # g.orthogonalize(w,fg_basis[0:15]) fg_basis = g.advise(fg_basis, g.infrequent_use) tg = g.block.grid(q.F_grid_eo, [12, 2, 2, 2, 2]) fg_cevec = g.advise([g.vcomplex(tg, 150) for i in range(nbasis)], g.infrequent_use) rng.zn(fg_cevec) fg_feval = [0.0 for i in range(nbasis)]
for i in range(len(basis)): n = g.norm2(basis[i]) g.message(i, n, cevec[0].grid.gsites) assert abs(n / cevec[0].grid.gsites - 1) < eps_norm # g.message("Before ortho") # b.orthonormalize() # g.message("After ortho") Mpc = g.qcd.fermion.preconditioner.eo1_ne(parity=g.odd)(qz).Mpc for i in range(0, len(basis), nskip): evec = g(b.promote * cevec[i]) n = g.norm2(evec) g.message("Norm evec:", i, n) assert abs(n - 1) < eps_norm ev = g.algorithms.eigen.evals(Mpc, [evec], real=True, check_eps2=eps2_evec)[0] g.message(i, ev, fev[i]) assert abs(ev / fev[i] - 1) < eps_eval cevecPrime = g(b.project * evec) g.message( "Test:", g.norm2(cevecPrime - cevec[i]) / g.norm2(cevecPrime), g.norm2(cevecPrime), g.norm2(cevec[i]), ) if load_from_alternative_scheme: g.save(evec_out, [basis, cevec, fev], fmt)
g.message("Time slice", t) U_temp = [g.lattice(u) for u in U] for u in U_temp: u[:] = 0 for dt in range(-t_smear_thick, t_smear_thick + 1): tp = (t + Nt + dt) % Nt for u_dst, u_src in zip(U_temp, U0): u_dst[:, :, :, tp] = u_src[:, :, :, tp] for i in range(n_smear): g.message("smear", i) U_temp = g.qcd.gauge.smear.stout(U_temp, rho=rho_smear) for u_dst, u_src in zip(U, U_temp): u_dst[:, :, :, t] = u_src[:, :, :, t] # save smeared gauge field g.save(config_smeared, U, g.format.nersc()) g.message("Plaquette after", g.qcd.gauge.plaquette(U)) for u in U: g.message("Unitarity violation", g.norm2(u * g.adj(u) - g.identity(u)) / g.norm2(u)) g.message( "SU violation", g.norm2(g.matrix.det(u) - g.identity(g.complex(u.grid))) / g.norm2(u), ) # separate time slices and define laplace operator U3 = [g.separate(u, 3) for u in U[0:3]] for t in range(Nt): if t % t_groups != t_group:
g.unsplit(Vt, Vt_split, cache) g.message("Project to group (should only remove rounding errors)") Vt = [g.project(vt, "defect") for vt in Vt] g.message("Test") # test results for t in range(Nt): f = g.qcd.gauge.fix.landau([Usep[mu][t] for mu in range(3)]) dfv = f.gradient(Vt[t], Vt[t]) theta = g.norm2(dfv).real / Vt[t].grid.gsites / dfv.otype.Nc g.message(f"theta[{t}] = {theta}") g.message(f"V[{t}][0,0,0] = ", Vt[t][0, 0, 0]) if theta > p_theta_eps or np.isnan(theta): g.message(f"Time slice{t} did not converge: {theta} >= {p_theta_eps}") sys.exit(1) # merge time slices V = g.merge(Vt, 3) U_transformed = g.qcd.gauge.transformed(U, V) # remove rounding errors on U_transformed U_transformed = [g.project(u, "defect") for u in U_transformed] # save results g.save(f"{p_source}.Coulomb", U_transformed, g.format.nersc()) g.save(f"{p_source}.CoulombV", V)
) g.message("Unsplit") g.unsplit(Vt, Vt_split, cache) g.message("Project to group (should only remove rounding errors)") Vt = [g.project(vt, "defect") for vt in Vt] g.message("Test") # test results for t in range(Nt): f = g.qcd.gauge.fix.landau([Usep[mu][t] for mu in range(3)]) dfv = f.gradient(Vt[t], Vt[t]) theta = g.norm2(dfv).real / Vt[t].grid.gsites / dfv.otype.Nc g.message(f"theta[{t}] = {theta}") g.message(f"V[{t}][0,0,0] = ", Vt[t][0, 0, 0]) # merge time slices V = g.merge(Vt, 3) U_transformed = g.qcd.gauge.transformed(U, V) # remove rounding errors on U_transformed U_transformed = [g.project(u, "defect") for u in U_transformed] # save results g.save("U.transformed", U_transformed, g.format.nersc()) g.save("V", V)