def split(first, split_grid): assert len(first) > 0 lattices = first gcoor = gpt.coordinates((split_grid, lattices[0].checkerboard())) lcoor = gpt.coordinates((split_grid, lattices[0].checkerboard())) assert len(lattices) % split_grid.sranks == 0 return split_lattices(lattices, lcoor, gcoor, split_grid, len(lattices) // split_grid.sranks)
def split_by_rank(first): if type(first) != list: return split_by_rank([first])[0] assert len(first) > 0 # TODO: split types lattices = first grid = lattices[0].grid mpi_split = [1, 1, 1, 1] fdimensions = [grid.fdimensions[i] // grid.mpi[i] for i in range(grid.nd)] split_grid = grid.split(mpi_split, fdimensions) gcoor = gpt.coordinates(lattices[0]) lcoor = gpt.coordinates((split_grid, lattices[0].checkerboard())) return split_lattices(lattices, lcoor, gcoor, split_grid, len(lattices))
def __init__(self, grid, local_coordinates): assert grid.cb.n == 1 self.grid = grid self.local_coordinates = local_coordinates # create a minimally embedding lattice geometry n = len(local_coordinates) N = self.grid.Nprocessors l = np.zeros(N, dtype=np.uint64) l[self.grid.processor] = 2**int(np.ceil(np.log(n) / np.log(2))) l = grid.globalsum(l) self.L = [int(np.max(l)) * self.grid.mpi[0]] + self.grid.mpi[1:] cb_simd_only_first_dimension = gpt.general(1, [0] * grid.nd, [1] + [0] * (grid.nd - 1)) # create grid as subcommunicator so that sparse domains play nice with split grid self.embedding_grid = gpt.grid( self.L, grid.precision, cb_simd_only_first_dimension, None, self.grid.mpi, grid, ) self.embedded_coordinates = np.ascontiguousarray( gpt.coordinates(self.embedding_grid)[0:n]) self.embedded_cache = {} self.local_cache = {} self.coordinate_lattices_cache = None self.weight_cache = None self.one_mask_cache = None
def __init__(self, n, precision): self.n = n self.fdimensions = [2**n] self.grid = g.grid(self.fdimensions, precision) self.verbose = g.default.is_verbose("qis_map") self.zero_coordinate = (0, ) # |00000 ... 0> state t = g.timer("map_init") t("coordinates") # TODO: need to split over multiple dimensions, single dimension can hold at most 32 bits self.coordinates = g.coordinates(self.grid) self.not_coordinates = [ np.bitwise_xor(self.coordinates, 2**i) for i in range(n) ] for i in range(n): self.not_coordinates[i].flags["WRITEABLE"] = False t("masks") self.one_mask = [] self.zero_mask = [] for i in range(n): proj = np.bitwise_and(self.coordinates, 2**i) mask = g.complex(self.grid) g.coordinate_mask(mask, proj != 0) self.one_mask.append(mask) mask = g.complex(self.grid) g.coordinate_mask(mask, proj == 0) self.zero_mask.append(mask) t() if self.verbose: g.message(t)
def open_view(self, xk, iview, write, mpi, fdimensions, g_cb, l_cb): cv = gpt.cartesian_view(iview if iview is not None else -1, mpi, fdimensions, g_cb, l_cb) dn, fn = get_local_name(self.root, cv) loc_desc = cv.describe() + "/" + ("Write" if write else "Read") tag = "%d-%s" % (xk, str(iview)) tag_pos = "%s-%s-%s-%s" % (tag, str(fdimensions), str(g_cb), str(l_cb)) if loc_desc != self.loc_desc: self.close_views() self.loc_desc = loc_desc if self.verbose: gpt.message("Switching view to %s" % self.loc_desc) if tag not in self.loc: if write and dn is not None: os.makedirs(dn, exist_ok=True) self.loc[tag] = gpt.FILE( fn, "a+b" if write else "rb") if fn is not None else None if tag_pos not in self.pos: self.pos[tag_pos] = gpt.coordinates(cv) return self.loc[tag], self.pos[tag_pos]
def merge_indices(dst, src, st, cache=default_merge_indices_cache): pos = gpt.coordinates(dst) assert st is not None result_otype = st[-1]() if result_otype is None: dst @= src return ndim = dst.otype.shape[st[0]] rank = len(st) - 1 islice = [slice(None, None, None) for i in range(len(dst.otype.shape))] ivec = [0] * rank cache_key = f"merge_indices_{dst.describe()}_{result_otype.__name__}_{dst.grid.obj}" tidx = [] src_i = [] for i in range(ndim**rank): idx = i for j in range(rank): c = idx % ndim islice[st[j]] = c ivec[j] = c idx //= ndim src_i.append(src[tuple(ivec)]) tidx.append(tuple(islice)) if cache_key not in cache: plan = gpt.copy_plan(dst, src_i) for i in range(ndim**rank): plan.destination += dst.view[(pos, ) + tidx[i]] plan.source += src_i[i].view[:] cache[cache_key] = plan() cache[cache_key](dst, src_i)
def coordinate_mask(field, mask): assert type(mask == numpy.ndarray) assert field.otype == gpt.ot_singlet x = gpt.coordinates(field) field[x] = mask.astype(field.grid.precision.complex_dtype).reshape( (len(mask), 1))
def perform(self, root): global basis_size, T, current_config if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) c = None vcj = [ g.vcolor(current_config.l_exact.U_grid) for jr in range(basis_size) ] for vcjj in vcj: vcjj[:] = 0 for tprime in range(T): basis_evec, basis_evals = g.load(self.basis_fmt % (self.conf, tprime)) plan = g.copy_plan(vcj[0], basis_evec[0], embed_in_communicator=vcj[0].grid) c = g.coordinates(basis_evec[0]) plan.destination += vcj[0].view[np.hstack( (c, np.ones((len(c), 1), dtype=np.int32) * tprime))] plan.source += basis_evec[0].view[c] plan = plan() for l in range(basis_size): plan(vcj[l], basis_evec[l]) for l in range(basis_size): g.message("Check norm:", l, g.norm2(vcj[l])) g.save(f"{root}/{self.name}/basis", vcj)
def split(self, mpi_split): split_grid = self.U_grid.split(mpi_split, self.U_grid.fdimensions) U_split = [gpt.lattice(split_grid, x.otype) for x in self.U] pos_split = gpt.coordinates(U_split[0]) for i, x in enumerate(U_split): x[pos_split] = self.U[i][pos_split] return self.updated(U_split)
def __init__(self, grid, base, dimensions=None): self.base = base if dimensions is None: dimensions = list(range(grid.nd)) self.fft = g.fft(dimensions) # create FA mask cache = {} self.weight = g.complex(grid) self.weight[:] = 0 coor = g.coordinates(self.weight) for mu in dimensions: c_mu = coor[:, mu].astype(np.complex128) c_mu_l = g.complex(grid) c_mu_l[coor, cache] = c_mu c_mu_l @= g.component.sin(c_mu_l * (np.pi / grid.gdimensions[mu])) c_mu_l @= c_mu_l * c_mu_l * complex(4.0) self.weight += c_mu_l # special consideration for zero self.weight[0, 0, 0, 0] = (2.0 * np.pi)**2.0 / np.prod( [grid.gdimensions[mu] for mu in dimensions])**(2.0 / len(dimensions)) # invert self.weight @= g.component.inv(self.weight) * complex( 4.0 * len(dimensions)) self.weight = [self.weight]
def sample(self, t, p): if type(t) == list: for x in t: self.sample(x, p) elif t is None: return cgpt.random_sample(self.obj, t, p) elif type(t) == gpt.lattice: if "pos" in p: pos = p["pos"] else: pos = gpt.coordinates(t) t0 = gpt.time() mv = cgpt.random_sample( self.obj, pos, { **p, **{ "shape": list(t.otype.shape), "grid": t.grid.obj, "precision": t.grid.precision, }, }, ) t1 = gpt.time() t[pos] = mv if self.verbose: szGB = mv.size * mv.itemsize / 1024.0**3.0 gpt.message("Generated %g GB of random data at %g GB/s" % (szGB, szGB / (t1 - t0))) return t else: assert 0
def apply_exp_ixp(dst, src, p): # TODO: add sparse field support (x.internal_coordinates(), x.coordinates()) x = gpt.coordinates(src) # create phase field phase = gpt.complex(src.grid) phase.checkerboard(src.checkerboard()) phase[x] = cgpt.coordinates_momentum_phase(x, p, src.grid.precision) dst @= phase * src
def split(first, split_grid, cache=None, group_policy=split_group_policy.separate): assert len(first) > 0 lattices = first gcoor = gpt.coordinates((split_grid, lattices[0].checkerboard())) lcoor = gpt.coordinates((split_grid, lattices[0].checkerboard())) assert len(lattices) % split_grid.sranks == 0 return split_lattices( lattices, lcoor, gcoor, split_grid, len(lattices) // split_grid.sranks, cache, group_policy, )
def perform(self, root): global current_config, current_light_quark if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) if (current_light_quark is not None and current_light_quark.evec_dir != self.evec_dir): current_light_quark = None if current_light_quark is None: current_light_quark = light_quark(current_config, self.evec_dir) prop_l = { "sloppy": current_light_quark.prop_l_sloppy, "exact": current_light_quark.prop_l_exact, }[self.solver] vcj = g.load(f"{root}/{self.conf}/pm_basis/basis") c = g.coordinates(vcj[0]) c = c[c[:, 3] == self.t] g.message( f"t = {self.t}, ilist = {self.ilist}, basis size = {len(vcj)}, solver = {self.solver}" ) root_job = f"{root}/{self.name}" output = g.gpt_io.writer(f"{root_job}/propagators") # create sources srcD = [ g.vspincolor(current_config.l_exact.U_grid) for spin in range(4) ] for i in self.ilist: for spin in range(4): srcD[spin][:] = 0 srcD[spin][c, spin, :] = vcj[i][c] g.message("Norm of source:", g.norm2(srcD[spin])) if i == 0: g.message("Source at origin:", srcD[spin][0, 0, 0, 0]) g.message("Source at time-origin:", srcD[spin][0, 0, 0, self.t]) prop = g.eval(prop_l * srcD) g.mem_report(details=False) for spin in range(4): output.write( {f"t{self.t}s{spin}c{i}_{self.solver}": prop[spin]}) output.flush()
def __init__(self, grid, margin, cb=None): super().__init__() if cb is None: cb = g.none self.grid = grid self.cb = cb dim = grid.nd self.local_grid = grid.split([1] * dim, [ grid.fdimensions[i] // grid.mpi[i] + 2 * margin[i] for i in range(dim) ]) self.gcoor = g.coordinates((grid, cb), margin=margin) self.lcoor = g.coordinates((self.local_grid, cb)) top = np.array(margin, dtype=np.int32) bottom = np.array(self.local_grid.fdimensions, dtype=np.int32) - top self.bcoor = np.sum(np.logical_and(self.lcoor >= top, self.lcoor < bottom), axis=1) == len(top)
def perform(self, root): global basis_size, sloppy_per_job, T, current_config, compress_ratio if current_config is not None and current_config.conf_file != self.conf_file: current_config = None if current_config is None: current_config = config(self.conf_file) U = current_config.U reduced_mpi = [x for x in U[0].grid.mpi] for i in range(len(reduced_mpi)): if reduced_mpi[i] % 2 == 0: reduced_mpi[i] //= 2 # create random selection of points with same spatial sites on each sink time slice # use different spatial sites for each source time-slice # this should be optimal for the local operator insertions rng = g.random(f"sparse2_{self.conf}_{self.t}") grid = U[0].grid t0 = grid.ldimensions[3] * grid.processor_coor[3] t1 = t0 + grid.ldimensions[3] spatial_sites = int(compress_ratio * np.prod(grid.ldimensions[0:3])) spatial_coordinates = rng.choice(g.coordinates(U[0]), spatial_sites) local_coordinates = np.repeat(spatial_coordinates, t1 - t0, axis=0) for t in range(t0, t1): local_coordinates[t - t0::t1 - t0, 3] = t sdomain = g.domain.sparse(current_config.l_exact.U_grid, local_coordinates) half_peramb = {"sparse_domain": sdomain} for i0 in range(0, basis_size, sloppy_per_job): for l in g.load( f"{root}/{self.conf}/pm_{self.solver}_t{self.t}_i{i0}/propagators" ): for x in l: S = sdomain.lattice(l[x].otype) sdomain.project(S, l[x]) half_peramb[x] = S g.message(x) g.save( f"{root}/{self.name}/propagators", half_peramb, g.format.gpt({"mpi": reduced_mpi}), )
def __call__(self): plan = g.copy_plan(self.destinations, self.sources) buffer_descriptions = [] for i in range(len(self.sources)): src = self.sources[i] src_cb = src.checkerboard() coordinates = g.coordinates(src) L = src.grid.fdimensions for x in self.displacements[i]: buffer_descriptions.append((src.grid, src.otype, src_cb)) plan.destination += self.destinations[self.indices[i] [x]].view[:] plan.source += src.view[cgpt.coordinates_shift( coordinates, x, L)] return cshift_executer(buffer_descriptions, plan())
def apply_exp_ixp(dst, src, p, origin, cache): cache_key = f"{src.grid}_{src.checkerboard().__name__}_{origin}_{p}" if cache_key not in cache: x = gpt.coordinates(src) phase = gpt.complex(src.grid) phase.checkerboard(src.checkerboard()) x_relative = x if origin is not None: x_relative = relative_coordinates(x, origin, src.grid.fdimensions) phase[x] = cgpt.coordinates_momentum_phase(x_relative, p, src.grid.precision) cache[cache_key] = phase dst @= cache[cache_key] * src
def merge_indices(dst, src, st): pos = gpt.coordinates(dst) assert st is not None result_otype = st[-1]() if result_otype is None: dst @= src return ndim = dst.otype.shape[st[0]] rank = len(st) - 1 islice = [slice(None, None, None) for i in range(len(dst.otype.shape))] ivec = [0] * rank for i in range(ndim ** rank): idx = i for j in range(rank): c = idx % ndim islice[st[j]] = c ivec[j] = c idx //= ndim dst[(pos,) + tuple(islice)] = src[tuple(ivec)][:]
def separate_indices(x, st, cache=default_merge_indices_cache): pos = gpt.coordinates(x) cb = x.checkerboard() assert st is not None result_otype = st[-1]() if result_otype is None: return x ndim = x.otype.shape[st[0]] rank = len(st) - 1 islice = [slice(None, None, None) for i in range(len(x.otype.shape))] ivec = [0] * rank result = {} keys = [] tidx = [] dst = [] for i in range(ndim**rank): idx = i for j in range(rank): c = idx % ndim islice[st[j]] = c ivec[j] = c idx //= ndim keys.append(tuple(ivec)) tidx.append(tuple(islice)) for i in keys: v = gpt.lattice(x.grid, result_otype) v.checkerboard(cb) result[i] = v dst.append(v) cache_key = f"separate_indices_{cb.__name__}_{result_otype.__name__}_{x.otype.__name__}_{x.grid.describe()}_{x.grid.obj}" if cache_key not in cache: plan = gpt.copy_plan(dst, x) for i in range(len(tidx)): plan.destination += result[keys[i]].view[pos] plan.source += x.view[(pos, ) + tidx[i]] cache[cache_key] = plan() cache[cache_key](dst, x) return result
def distribute_cartesian_file(fdimensions, grid, cb): ldimensions = [x for x in fdimensions] dimdiv = len(ldimensions) - 1 primes = [7, 5, 3, 2] nreader = 1 found = True while found: found = False for p in primes: if ldimensions[dimdiv] % p == 0 and nreader * p <= grid.Nprocessors: nreader *= p ldimensions[dimdiv] //= p if ldimensions[dimdiv] == 1 and dimdiv > 0: dimdiv -= 1 found = True cv_desc = [a // b for a, b in zip(fdimensions, ldimensions)] cv = gpt.cartesian_view(grid.processor, cv_desc, fdimensions, grid.cb, cb) return gpt.coordinates(cv), nreader
def map_pos(grid, cb, key): # if list, convert to numpy array if type(key) == list: key = numpy.array(key, dtype=numpy.int32) # if key is numpy array, no further processing needed if isinstance(key, numpy.ndarray): return key # if not, we expect a tuple of slices assert type(key) == tuple # slices without specified start/stop corresponds to memory view limitation for this rank if all([k == slice(None, None, None) for k in key]): # go through gpt.coordinates to use its caching feature return gpt.coordinates((grid, cb), order="lexicographic") nd = grid.nd key = tuple([k if type(k) == slice else slice(k, k + 1) for k in key]) assert all([k.step is None for k in key]) top = [ grid.fdimensions[i] // grid.mpi[i] * grid.processor_coor[i] if k.start is None else k.start for i, k in enumerate(key) ] bottom = [ grid.fdimensions[i] // grid.mpi[i] * (1 + grid.processor_coor[i]) if k.stop is None else k.stop for i, k in enumerate(key) ] assert all([ 0 <= top[i] and top[i] <= bottom[i] and bottom[i] <= grid.fdimensions[i] for i in range(nd) ]) return cgpt.coordinates_from_cartesian_view(top, bottom, grid.cb.cb_mask, cb.tag, "lexicographic")
def mk_qlat_gpt_copy_plan(ctype, total_site, multiplicity, tag): geo = q.Geometry(total_site, multiplicity) f_gpt = mk_gpt_field(ctype, geo) f_qlat = q.Field(ctype, geo) lexicographic_coordinates = g.coordinates(f_gpt) buf = f_qlat.mview() if tag == "qlat_from_gpt": qlat_from_gpt = g.copy_plan(buf, f_gpt) qlat_from_gpt.destination += g.global_memory_view( f_gpt.grid, [[f_gpt.grid.processor, buf, 0, buf.nbytes]]) qlat_from_gpt.source += f_gpt.view[lexicographic_coordinates] qlat_from_gpt = qlat_from_gpt(local_only=True) return qlat_from_gpt elif tag == "gpt_from_qlat": gpt_from_qlat = g.copy_plan(f_gpt, buf) gpt_from_qlat.source += g.global_memory_view( f_gpt.grid, [[f_gpt.grid.processor, buf, 0, buf.nbytes]]) gpt_from_qlat.destination += f_gpt.view[lexicographic_coordinates] gpt_from_qlat = gpt_from_qlat(local_only=True) return gpt_from_qlat else: q.displayln_info(tag) raise Exception("mk_qlat_gpt_copy_plan")
def separate_indices(x, st): pos = gpt.coordinates(x) cb = x.checkerboard() assert st is not None result_otype = st[-1]() if result_otype is None: return x ndim = x.otype.shape[st[0]] rank = len(st) - 1 islice = [slice(None, None, None) for i in range(len(x.otype.shape))] ivec = [0] * rank result = {} for i in range(ndim ** rank): idx = i for j in range(rank): c = idx % ndim islice[st[j]] = c ivec[j] = c idx //= ndim v = gpt.lattice(x.grid, result_otype) v.checkerboard(cb) v[pos] = x[(pos,) + tuple(islice)] result[tuple(ivec)] = v return result
else: work_dir = "." # load configuration # U = g.load("/hpcgpfs01/work/clehner/configs/32IDfine/ckpoint_lat.200") # assert abs(g.qcd.gauge.plaquette(U) - float(U[0].metadata["PLAQUETTE"])) < 1e-9 # Show metadata of field # g.message("Metadata", U[0].metadata) rng = g.random("test") U = g.qcd.gauge.random(g.grid([8, 8, 8, 16], g.double), rng) # create a sparse sub-domain and a sparse lattice S with 1% of points sdomain = g.domain.sparse( U[0].grid, rng.choice(g.coordinates(U[0]), int(0.01 * U[0].grid.gsites / U[0].grid.Nprocessors)), ) # test sparse domain S = sdomain.lattice(U[0].otype) sdomain.project(S, U[0]) U0prime = g.lattice(U[0]) U0prime[:] = 0 sdomain.promote(U0prime, S) assert np.linalg.norm(U0prime[sdomain.local_coordinates] - U[0][sdomain.local_coordinates]) < 1e-14 s_slice = sdomain.slice(S, 3) # save in default gpt format g.save( f"{work_dir}/out", {
rng = g.random("test") l_dp = rng.cnormal(g.vcolor(grid_dp)) l_sp = g.convert(l_dp, g.single) # and convert precision l_dp_prime = g.convert(l_sp, g.double) eps2 = g.norm2(l_dp - l_dp_prime) / g.norm2(l_dp) assert eps2 < 1e-14 eps2 = g.norm2(l_dp[0, 0, 0, 0] - l_sp[0, 0, 0, 0]) assert eps2 < 1e-14 ################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x) assert mv.itemsize == 1 and mv.shape[0] == len(mv) assert sys.getrefcount(x) == 3 del mv assert sys.getrefcount(x) == 2 ################################################################################ # Test pinning ################################################################################ l_v = g.complex(grid_sp) pin = g.pin(l_v, g.accelerator) del l_v del pin
def create_links(A, fmat, basis, params): # NOTE: we expect the blocks in the basis vectors # to already be orthogonalized! # parameters make_hermitian = params["make_hermitian"] save_links = params["save_links"] assert not (make_hermitian and not save_links) # verbosity verbose = gpt.default.is_verbose("coarsen") # setup timings t = gpt.timer("coarsen") t("setup") # get grids f_grid = basis[0].grid c_grid = A[0].grid # directions/displacements we coarsen for dirs = [1, 2, 3, 4] if f_grid.nd == 5 else [0, 1, 2, 3] disp = +1 dirdisps_full = list(zip(dirs * 2, [+1] * 4 + [-1] * 4)) dirdisps_forward = list(zip(dirs, [disp] * 4)) nhops = len(dirdisps_full) selflink = nhops # setup fields Mvr = [gpt.lattice(basis[0]) for i in range(nhops)] tmp = gpt.lattice(basis[0]) oproj = gpt.vcomplex(c_grid, len(basis)) selfproj = gpt.vcomplex(c_grid, len(basis)) # setup masks onemask, blockevenmask, blockoddmask = ( gpt.complex(f_grid), gpt.complex(f_grid), gpt.complex(f_grid), ) dirmasks = [gpt.complex(f_grid) for p in range(nhops)] # auxilliary stuff needed for masks t("masks") onemask[:] = 1.0 coor = gpt.coordinates(blockevenmask) block = numpy.array(f_grid.ldimensions) / numpy.array(c_grid.ldimensions) block_cb = coor[:, :] // block[:] # fill masks for sites within even/odd blocks gpt.coordinate_mask(blockevenmask, numpy.sum(block_cb, axis=1) % 2 == 0) blockoddmask @= onemask - blockevenmask # fill masks for sites on borders of blocks dirmasks_forward_np = coor[:, :] % block[:] == block[:] - 1 dirmasks_backward_np = coor[:, :] % block[:] == 0 for mu in dirs: gpt.coordinate_mask(dirmasks[mu], dirmasks_forward_np[:, mu]) gpt.coordinate_mask(dirmasks[mu + 4], dirmasks_backward_np[:, mu]) # save applications of matrix and coarsening if possible dirdisps = dirdisps_forward if save_links else dirdisps_full # create block maps t("blockmap") dirbms = [ gpt.block.map(c_grid, basis, dirmasks[p]) for p, (mu, fb) in enumerate(dirdisps) ] fullbm = gpt.block.map(c_grid, basis) for i, vr in enumerate(basis): # apply directional hopping terms # this triggers len(dirdisps) comms -> TODO expose DhopdirAll from Grid # BUT problem with vector<Lattice<...>> in rhs t("apply_hop") [fmat.Mdir(*dirdisp)(Mvr[p], vr) for p, dirdisp in enumerate(dirdisps)] # coarsen directional terms + write to link for p, (mu, fb) in enumerate(dirdisps): t("coarsen_hop") dirbms[p].project(oproj, Mvr[p]) t("copy_hop") A[p][:, :, :, :, :, i] = oproj[:] # fast diagonal term: apply full matrix to both block cbs separately and discard hops into other cb t("apply_self") tmp @= (blockevenmask * fmat * vr * blockevenmask + blockoddmask * fmat * vr * blockoddmask) # coarsen diagonal term t("coarsen_self") fullbm.project(selfproj, tmp) # write to self link t("copy_self") A[selflink][:, :, :, :, :, i] = selfproj[:] if verbose: gpt.message("coarsen: done with vector %d" % i) # communicate opposite links if save_links: t("comm") communicate_links(A, dirdisps_forward, make_hermitian) t() if verbose: gpt.message(t)
# Show metadata of field g.message("Metadata", U[0].metadata) # to single precision #U = g.convert(U, g.single) # save in default gpt format g.save( "out", { "va\nl": [ 0, 1, 3, "tes\n\0t", 3.123456789123456789, 1.123456789123456789e-7, 1 + 3.1231251251234123413j ], # fundamental data types "np": g.coordinates(U[0].grid), # write numpy array from root node "U": U # write list of lattices }) # save in custom gpt format with different mpi distribution of local views g.save( "out2", { "val": [ 0, 1, 3, "test", 3.123456789123456789, 1.123456789123456789e-7, 1 + 3.1231251251234123413j ], # fundamental data types "np": g.coordinates(U[0].grid), # write numpy array from root node "U":
import numpy as np import sys, cgpt # grid L = [16, 16, 16, 32] grid_dp = g.grid(L, g.double) grid_sp = g.grid(L, g.single) # test fields l_dp = g.random("test").cnormal(g.vcolor(grid_dp)) l_sp = g.convert(l_dp, g.single) ################################################################################ # Test mview ################################################################################ c = g.coordinates(l_dp) x = l_dp[c] mv = g.mview(x) assert mv.itemsize == 1 and mv.shape[0] == len(mv) assert sys.getrefcount(x) == 3 del mv assert sys.getrefcount(x) == 2 ################################################################################ # Test assignments ################################################################################ pos = l_dp.mview_coordinates() lhs = g.lattice(l_dp) def assign_copy():
def coordinates(src, position, spacing): coor = gpt.coordinates(src) return coor[np.sum(np.mod(coor - position, spacing), axis=1) == 0]