def __setitem__(self, key, value): # unpack cache cache, key = unpack_cache_key(key) cache_key = None if cache is None else "set" # short code path to zero lattice if type(key) == slice and key == slice(None, None, None): if gpt.util.is_num(value): for o in self.v_obj: cgpt.lattice_set_to_number(o, value) return cache_key = ( f"{self.otype.__name__}_{self.checkerboard().__name__}_{self.grid.describe()}" ) cache = lattice.cache # general code path, map key pos, tidx, shape = gpt.map_key(self, key) n_pos = len(pos) # convert input to proper numpy array value = gpt.util.tensor_to_value( value, dtype=self.grid.precision.complex_dtype) if value is None: value = memoryview(bytearray()) # needed bytes and optional cyclic upscaling nbytes_needed = n_pos * numpy.prod( shape) * self.grid.precision.nbytes * 2 value = cgpt.copy_cyclic_upscale(value, nbytes_needed) # create plan if cache_key is None or cache_key not in cache: plan = gpt.copy_plan(self, value) plan.destination += gpt.lattice_view(self, pos, tidx) plan.source += gpt.global_memory_view( self.grid, [[self.grid.processor, value, 0, value.nbytes]] if value.nbytes > 0 else None, ) # skip optimization if we only use it once xp = plan( local_only=isinstance(pos, gpt.core.local_coordinates), skip_optimize=cache_key is None, ) if cache_key is not None: cache[cache_key] = xp else: xp = cache[cache_key] xp(self, value)
def __getitem__(self, key): # unpack cache cache, key = unpack_cache_key(key) cache_key = None if cache is None else "get" # general code path, map key pos, tidx, shape = gpt.map_key(self, key) n_pos = len(pos) # create target value = cgpt.ndarray((n_pos, *shape), self.grid.precision.complex_dtype) # create plan if cache_key is None or cache_key not in cache: plan = gpt.copy_plan(value, self) plan.destination += gpt.global_memory_view( self.grid, [[self.grid.processor, value, 0, value.nbytes]] if value.nbytes > 0 else None, ) plan.source += gpt.lattice_view(self, pos, tidx) xp = plan() if cache_key is not None: cache[cache_key] = xp else: xp = cache[cache_key] xp(value, self) # if only a single element is returned and we have the full shape, # wrap in a tensor if len(value) == 1 and shape == self.otype.shape: return gpt.util.value_to_tensor(value[0], self.otype) return value
def __getitem__(self, key): pos, tidx, shape = gpt.map_key(self.parent, key) return gpt.lattice_view(self.parent, pos, tidx)