def fine_operator(self, coarse_operator): verbose = gpt.default.is_verbose("block_operator") coarse_otype = gpt.ot_vector_singlet(len(self.basis)) otype = self.basis[0].otype grid = self.basis[0].grid cb = self.basis[0].checkerboard() def mat(dst, src): csrc = [gpt.lattice(self.coarse_grid, coarse_otype) for x in src] cdst = [gpt.lattice(self.coarse_grid, coarse_otype) for x in src] t0 = gpt.time() self.project(csrc, src) t1 = gpt.time() coarse_operator(cdst, csrc) t2 = gpt.time() self.promote(dst, cdst) t3 = gpt.time() if verbose: gpt.message( "fine_operator acting on %d vector(s) in %g s (project %g s, coarse_operator %g s, promote %g s)" % (len(src), t3 - t0, t1 - t0, t2 - t1, t3 - t2) ) return gpt.matrix_operator( mat=mat, otype=otype, grid=grid, cb=cb, accept_list=True )
def check_orthogonality(self, tol=None): c_otype = gpt.ot_vector_singlet(len(self.basis)) iproj = gpt.lattice(self.coarse_grid, c_otype) eproj = gpt.lattice(self.coarse_grid, c_otype) for i, v in enumerate(self.basis): iproj @= self.project * v eproj[:] = 0.0 eproj[:, :, :, :, i] = 1.0 err2 = gpt.norm2(eproj - iproj) if tol is not None: assert err2 <= tol gpt.message( f"blockmap: ortho check for vector {i:d}: {err2:e} <= {tol:e}" ) else: gpt.message(f"blockmap: ortho check error for vector {i:d}: {err2:e}")
def __init__(self, coarse_grid, basis, mask=None, basis_n_block=8): assert type(coarse_grid) == gpt.grid assert len(basis) > 0 if mask is None: mask = gpt.complex(basis[0].grid) mask.checkerboard(basis[0].checkerboard()) mask[:] = 1 else: assert basis[0].grid is mask.grid assert len(mask.v_obj) == 1 c_otype = gpt.ot_vector_singlet(len(basis)) basis_size = c_otype.v_n1[0] self.coarse_grid = coarse_grid self.basis = basis self.obj = cgpt.create_block_map( coarse_grid.obj, basis, basis_size, basis_n_block, mask.v_obj[0], ) def _project(coarse, fine): assert fine[0].checkerboard().__name__ == basis[0].checkerboard().__name__ cgpt.block_project(self.obj, coarse, fine) def _promote(fine, coarse): assert fine[0].checkerboard().__name__ == basis[0].checkerboard().__name__ cgpt.block_promote(self.obj, coarse, fine) self.project = gpt.matrix_operator( mat=_project, otype=(c_otype, basis[0].otype), grid=(coarse_grid, basis[0].grid), cb=(None, basis[0].checkerboard()), accept_list=True, ) self.promote = gpt.matrix_operator( mat=_promote, otype=(basis[0].otype, c_otype), grid=(basis[0].grid, coarse_grid), cb=(basis[0].checkerboard(), None), accept_list=True, )
def coarse_operator(self, fine_operator): verbose = gpt.default.is_verbose("block_operator") def mat(dst_coarse, src_coarse): src_fine = [gpt.lattice(self.basis[0]) for x in src_coarse] dst_fine = [gpt.lattice(self.basis[0]) for x in src_coarse] t0 = gpt.time() self.promote(src_fine, src_coarse) t1 = gpt.time() fine_operator(dst_fine, src_fine) t2 = gpt.time() self.project(dst_coarse, dst_fine) t3 = gpt.time() if verbose: gpt.message( "coarse_operator acting on %d vector(s) in %g s (promote %g s, fine_operator %g s, project %g s)" % (len(src_coarse), t3 - t0, t1 - t0, t2 - t1, t3 - t2) ) otype = gpt.ot_vector_singlet(len(self.basis)) return gpt.matrix_operator( mat=mat, otype=otype, grid=self.coarse_grid, accept_list=True )
# main test loop for precision in [g.single, g.double]: grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision) N = 100 Nwarmup = 5 g.message( f""" Inner Product Benchmark with fdimensions : {grid.fdimensions} precision : {precision.__name__} """ ) # Source and destination for tp in [g.ot_singlet(), g.ot_vector_spin_color(4, 3), g.ot_vector_singlet(12)]: for n in [1, 4]: one = [g.lattice(grid, tp) for i in range(n)] two = [g.lattice(grid, tp) for i in range(n)] rng.cnormal([one, two]) # Rank inner product nbytes = (one[0].global_bytes() + two[0].global_bytes()) * N * n * n for use_accelerator, compute_name, access in [ (False, "host", access_host), (True, "accelerator", access_accelerator), ]: # Time dt = 0.0 cgpt.timer_begin()
eps2 = g.norm2(lat - np) / g.norm2(lat) g.message( f"Test {tr.__name__}({a_type.__name__} + {b_type.__name__}): {eps2}" ) assert eps2 < 1e-11 for a_type in [ g.ot_matrix_spin_color(4, 3), g.ot_vector_spin_color(4, 3), g.ot_matrix_spin(4), g.ot_vector_spin(4), g.ot_matrix_color(3), g.ot_vector_color(3), g.ot_matrix_singlet(8), g.ot_vector_singlet(8), ]: a = rng.cnormal(g.lattice(grid, a_type)) b = rng.cnormal(g.lattice(grid, a_type)) test_linear_combinations(a, b) # test epsilon tensor M = np.random.rand(5, 5) d1 = np.linalg.det(M) d2 = 0 for idx, sign in g.epsilon(5): d2 += (M[0, idx[0]] * M[1, idx[1]] * M[2, idx[2]] * M[3, idx[3]] * M[4, idx[4]] * sign) assert abs(d1 - d2) < 1e-13
# main test loop for precision in [g.single, g.double]: grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision) N = 100 Nwarmup = 5 g.message(f""" Inner Product Benchmark with fdimensions : {grid.fdimensions} precision : {precision.__name__} """) # Source and destination for tp in [ g.ot_singlet(), g.ot_vector_spin_color(4, 3), g.ot_vector_singlet(12) ]: for n in [1, 4]: one = [g.lattice(grid, tp) for i in range(n)] two = [g.lattice(grid, tp) for i in range(n)] rng.cnormal([one, two]) # Rank inner product nbytes = (one[0].global_bytes() + two[0].global_bytes()) * N * n * n for use_accelerator, compute_name in [ (False, "host"), (True, "accelerator"), ]: for access in [access_host, access_accelerator]: