def test_conditional_dimension(self): """Test that different ConditionalDimensions have different hash value.""" d0 = Dimension(name='d') s0 = Scalar(name='s') d1 = Dimension(name='d', spacing=s0) cd0 = ConditionalDimension(name='cd', parent=d0, factor=4) cd1 = ConditionalDimension(name='cd', parent=d0, factor=5) assert cd0 is not cd1 assert hash(cd0) != hash(cd1) cd2 = ConditionalDimension(name='cd', parent=d0, factor=4, indirect=True) assert hash(cd0) != hash(cd2) cd3 = ConditionalDimension(name='cd', parent=d1, factor=4) assert hash(cd0) != hash(cd3) s1 = Scalar(name='s', dtype=np.int32) cd4 = ConditionalDimension(name='cd', parent=d0, factor=4, condition=s0 > 3) assert hash(cd0) != hash(cd4) cd5 = ConditionalDimension(name='cd', parent=d0, factor=4, condition=s1 > 3) assert hash(cd0) != hash(cd5) assert hash(cd4) != hash(cd5)
def __init__(self, shape, extent=None, origin=None, dimensions=None, time_dimension=None, dtype=np.float32, subdomains=None, comm=None, topology=None): shape = as_tuple(shape) # Create or pull the SpaceDimensions if dimensions is None: ndim = len(shape) assert ndim <= 3 dim_names = self._default_dimensions[:ndim] dim_spacing = tuple(Scalar(name='h_%s' % n, dtype=dtype, is_const=True) for n in dim_names) dimensions = tuple(SpaceDimension(name=n, spacing=s) for n, s in zip(dim_names, dim_spacing)) else: for d in dimensions: if not d.is_Space: raise ValueError("Cannot create Grid with Dimension `%s` " "since it's not a SpaceDimension" % d) if d.is_Derived and not d.is_Conditional: raise ValueError("Cannot create Grid with derived Dimension `%s` " "of type `%s`" % (d, type(d))) dimensions = dimensions super().__init__(shape, dimensions, dtype) # Create a Distributor, used internally to implement domain decomposition # by all Functions defined on this Grid self._distributor = Distributor(shape, dimensions, comm, topology) # The physical extent self._extent = as_tuple(extent or tuple(1. for _ in self.shape)) # Initialize SubDomains subdomains = tuple(i for i in (Domain(), Interior(), *as_tuple(subdomains))) for counter, i in enumerate(subdomains): i.__subdomain_finalize__(self, counter=counter) self._subdomains = subdomains self._origin = as_tuple(origin or tuple(0. for _ in self.shape)) self._origin_symbols = tuple(Scalar(name='o_%s' % d.name, dtype=dtype, is_const=True) for d in self.dimensions) # Sanity check assert (self.dim == len(self.origin) == len(self.extent) == len(self.spacing)) # Store or create default symbols for time and stepping dimensions if time_dimension is None: spacing = Scalar(name='dt', dtype=dtype, is_const=True) self._time_dim = TimeDimension(name='time', spacing=spacing) self._stepping_dim = SteppingDimension(name='t', parent=self.time_dim) elif isinstance(time_dimension, TimeDimension): self._time_dim = time_dimension self._stepping_dim = SteppingDimension(name='%s_s' % self.time_dim.name, parent=self.time_dim) else: raise ValueError("`time_dimension` must be None or of type TimeDimension")
def _symbolic_thickness(cls, name): return (Scalar(name="%s_ltkn" % name, dtype=np.int32, is_const=True, nonnegative=True), Scalar(name="%s_rtkn" % name, dtype=np.int32, is_const=True, nonnegative=True))
def test_dimension(self): """Test that different Dimensions have different hash value.""" d0 = Dimension(name='d') s0 = Scalar(name='s') d1 = Dimension(name='d', spacing=s0) assert hash(d0) != hash(d1) s1 = Scalar(name='s', dtype=np.int32) d2 = Dimension(name='d', spacing=s1) assert hash(d1) != hash(d2) d3 = Dimension(name='d', spacing=Constant(name='s1')) assert hash(d3) != hash(d0) assert hash(d3) != hash(d1)
def test_scalar(self): """ Test that Scalars with same name but different attributes do not alias to the same Scalar. Conversely, if the name and the attributes are the same, they must alias to the same Scalar. """ s0 = Scalar(name='s0') s1 = Scalar(name='s0') assert s0 is s1 s2 = Scalar(name='s0', dtype=np.int32) assert s2 is not s1 s3 = Scalar(name='s0', is_const=True) assert s3 is not s1
def interpolate(self, expr, offset=0, increment=False, self_subs={}): """ Generate equations interpolating an arbitrary expression into ``self``. Parameters ---------- expr : expr-like Input expression to interpolate. offset : int, optional Additional offset from the boundary. increment: bool, optional If True, generate increments (Inc) rather than assignments (Eq). """ variables = list(retrieve_function_carriers(expr)) # List of indirection indices for all adjacent grid points idx_subs, eqns = self._interpolation_indices(variables, offset) # Substitute coordinate base symbols into the interpolation coefficients args = [ expr.subs(v_sub) * b.subs(v_sub) for b, v_sub in zip(self._interpolation_coeffs, idx_subs) ] # Accumulate point-wise contributions into a temporary rhs = Scalar(name='sum', dtype=self.dtype) summands = [Eq(rhs, 0.)] + [Inc(rhs, i) for i in args] # Write/Incr `self` lhs = self.subs(self_subs) last = [Inc(lhs, rhs)] if increment else [Eq(lhs, rhs)] return eqns + summands + last
def test_conditional_dimension(self): """ Test that ConditionalDimensions with same name but different attributes do not alias to the same ConditionalDimension. Conversely, if the name and the attributes are the same, they must alias to the same ConditionalDimension. """ i = Dimension(name='i') ci0 = ConditionalDimension(name='ci', parent=i, factor=4) ci1 = ConditionalDimension(name='ci', parent=i, factor=4) assert ci0 is ci1 ci2 = ConditionalDimension(name='ci', parent=i, factor=8) assert ci2 is not ci1 ci3 = ConditionalDimension(name='ci', parent=i, factor=4, indirect=True) assert ci3 is not ci1 s = Scalar(name='s') ci4 = ConditionalDimension(name='ci', parent=i, factor=4, condition=s > 3) assert ci4 is not ci1 ci5 = ConditionalDimension(name='ci', parent=i, factor=4, condition=s > 3) assert ci5 is ci4
def test_clear_cache_with_alive_symbols(self, operate_on_empty_cache, nx=1000, ny=1000): """ Test that `clear_cache` doesn't affect caching if an object is still alive. """ grid = Grid(shape=(nx, ny), dtype=np.float64) f0 = Function(name='f', grid=grid, space_order=2) f1 = Function(name='f', grid=grid, space_order=2) # Obviously: assert f0 is not f1 # And clearly, both still alive after a `clear_cache` clear_cache() assert f0 is not f1 assert f0.grid.dimensions[0] is grid.dimensions[0] # Now we try with symbols s0 = Scalar(name='s') s1 = Scalar(name='s') # Clearly: assert s1 is s0 clear_cache() s2 = Scalar(name='s') # s2 must still be s1/so, even after a clear_cache, as so/s1 are both alive! assert s2 is s1 del s0 del s1 s3 = Scalar(name='s') # And obviously, still: assert s3 is s2 cache_size = len(_SymbolCache) del s2 del s3 clear_cache() assert len(_SymbolCache) == cache_size - 1
def test_sub_dimension(self): """Test that different SubDimensions have different hash value.""" d0 = Dimension(name='d') d1 = Dimension(name='d', spacing=Scalar(name='s')) di0 = SubDimension.middle('di', d0, 1, 1) di1 = SubDimension.middle('di', d1, 1, 1) assert hash(di0) != hash(d0) assert hash(di0) != hash(di1) dl0 = SubDimension.left('dl', d0, 2) assert hash(dl0) != hash(di0)
def test_dimension_cache(): """ Test that :class:`Dimension`s with same name but different attributes do not alias to the same Dimension. """ d0 = Dimension(name='d') d1 = Dimension(name='d') assert d0 is d1 s0 = Scalar(name='s0') s1 = Scalar(name='s1') d2 = Dimension(name='d', spacing=s0) d3 = Dimension(name='d', spacing=s1) assert d2 is not d3 d4 = Dimension(name='d', spacing=s1) assert d3 is d4 d5 = Dimension(name='d', spacing=Constant(name='s1')) assert d2 is not d5
def test_dimension(self): """ Test that Dimensions with same name but different attributes do not alias to the same Dimension. Conversely, if the name and the attributes are the same, they must alias to the same Dimension. """ d0 = Dimension(name='d') d1 = Dimension(name='d') assert d0 is d1 s0 = Scalar(name='s0') s1 = Scalar(name='s1') d2 = Dimension(name='d', spacing=s0) d3 = Dimension(name='d', spacing=s1) assert d2 is not d3 d4 = Dimension(name='d', spacing=s1) assert d3 is d4 d5 = Dimension(name='d', spacing=Constant(name='s1')) assert d2 is not d5
def _position_map(self): """ Symbols map for the position of the sparse points relative to the grid origin. Notes ----- The expression `(coord - origin)/spacing` could also be computed in the mathematically equivalent expanded form `coord/spacing - origin/spacing`. This particular form is problematic when a sparse point is in close proximity of the grid origin, since due to a larger machine precision error it may cause a +-1 error in the computation of the position. We mitigate this problem by computing the positions individually (hence the need for a position map). """ symbols = [Scalar(name='pos%s' % d, dtype=self.dtype) for d in self.grid.dimensions] return OrderedDict([(c - o, p) for p, c, o in zip(symbols, self._coordinate_symbols, self.grid.origin)])
def interpolate(self, expr, offset=0, increment=False, self_subs={}): """ Generate equations interpolating an arbitrary expression into ``self``. Parameters ---------- expr : expr-like Input expression to interpolate. offset : int, optional Additional offset from the boundary. increment: bool, optional If True, generate increments (Inc) rather than assignments (Eq). """ # Derivatives must be evaluated before the introduction of indirect accesses try: expr = expr.evaluate except AttributeError: # E.g., a generic SymPy expression or a number pass variables = list(retrieve_function_carriers(expr)) # List of indirection indices for all adjacent grid points idx_subs, temps = self._interpolation_indices(variables, offset) # Substitute coordinate base symbols into the interpolation coefficients args = [ expr.xreplace(v_sub) * b.xreplace(v_sub) for b, v_sub in zip(self._interpolation_coeffs, idx_subs) ] # Accumulate point-wise contributions into a temporary rhs = Scalar(name='sum', dtype=self.dtype) summands = [Eq(rhs, 0., implicit_dims=self.dimensions)] summands.extend( [Inc(rhs, i, implicit_dims=self.dimensions) for i in args]) # Write/Incr `self` lhs = self.subs(self_subs) last = [Inc(lhs, rhs)] if increment else [Eq(lhs, rhs)] return temps + summands + last
def test_symbols(self): """ Test that ``Symbol(name='s') != Scalar(name='s') != Dimension(name='s')``. They all: * rely on the same caching mechanism * boil down to creating a sympy.Symbol * created with the same args/kwargs (``name='s'``) """ sy = Symbol(name='s') sc = Scalar(name='s') d = Dimension(name='s') assert sy is not sc assert sc is not d assert sy is not d assert isinstance(sy, Symbol) assert isinstance(sc, Scalar) assert isinstance(d, Dimension)
def callback(): # Derivatives must be evaluated before the introduction of indirect accesses try: _expr = expr.evaluate except AttributeError: # E.g., a generic SymPy expression or a number _expr = expr variables = list(retrieve_function_carriers(_expr)) # Need to get origin of the field in case it is staggered # TODO: handle each variable staggereing spearately field_offset = variables[0].origin # List of indirection indices for all adjacent grid points idx_subs, temps = self._interpolation_indices( variables, offset, field_offset=field_offset) # Substitute coordinate base symbols into the interpolation coefficients args = [ _expr.xreplace(v_sub) * b.xreplace(v_sub) for b, v_sub in zip(self._interpolation_coeffs, idx_subs) ] # Accumulate point-wise contributions into a temporary rhs = Scalar(name='sum', dtype=self.sfunction.dtype) summands = [Eq(rhs, 0., implicit_dims=self.sfunction.dimensions)] summands.extend([ Inc(rhs, i, implicit_dims=self.sfunction.dimensions) for i in args ]) # Write/Incr `self` lhs = self.sfunction.subs(self_subs) last = [Inc(lhs, rhs)] if increment else [Eq(lhs, rhs)] return temps + summands + last
def symbolic_size(self): """Symbolic size of the Dimension.""" return Scalar(name=self.size_name, dtype=np.int32, is_const=True)
def step(self): if self._step is not None: return self._step else: return Scalar(name=self.size_name, dtype=np.int32, is_const=True)
def symbolic_flag(self): return Scalar(name='flag', dtype=np.int32)
def symbolic_id(self): return Scalar(name='id', dtype=np.int32)
def symbolic_base(self): return Scalar(name=self.name, dtype=None)
def __new_stage2__(cls, name, spacing=None): newobj = sympy.Symbol.__xnew__(cls, name) newobj._spacing = spacing or Scalar(name='h_%s' % name, is_const=True) return newobj
def _point_symbols(self): """Symbol for coordinate value in each dimension of the point.""" return tuple( Scalar(name='p%s' % d, dtype=self.dtype) for d in self.grid.dimensions)
def symbolic_min(self): return Scalar(name=self.min_name, dtype=np.int32)
def __init_finalize__(self, name, spacing=None, default_value=None): self._spacing = spacing or Scalar(name='h_%s' % name, is_const=True) self._default_value = default_value or 0
def symbolic_max(self): return Scalar(name=self.max_name, dtype=np.int32)
def symbolic_max(self): """Symbol defining the maximum point of the Dimension.""" return Scalar(name=self.max_name, dtype=np.int32, is_const=True)
def __init_finalize__(self, name, spacing=None): self._spacing = spacing or Scalar(name='h_%s' % name, is_const=True)
def forward(self, src=None, rec=None, u=None, v=None, vp=None, epsilon=None, delta=None, theta=None, phi=None, save=False, kernel='centered', **kwargs): """ Forward modelling function that creates the necessary data objects for running a forward modelling operator. Parameters ---------- geometry : AcquisitionGeometry Geometry object that contains the source (SparseTimeFunction) and receivers (SparseTimeFunction) and their position. u : TimeFunction, optional The computed wavefield first component. v : TimeFunction, optional The computed wavefield second component. vp : Function or float, optional The time-constant velocity. epsilon : Function or float, optional The time-constant first Thomsen parameter. delta : Function or float, optional The time-constant second Thomsen parameter. theta : Function or float, optional The time-constant Dip angle (radians). phi : Function or float, optional The time-constant Azimuth angle (radians). save : bool, optional Whether or not to save the entire (unrolled) wavefield. kernel : str, optional Type of discretization, centered or shifted. Returns ------- Receiver, wavefield and performance summary. """ if kernel == 'staggered': time_order = 1 dims = self.model.space_dimensions stagg_u = (-dims[-1]) stagg_v = (-dims[0], -dims[1]) if self.model.grid.dim == 3 else (-dims[0]) else: time_order = 2 stagg_u = stagg_v = None # Source term is read-only, so re-use the default src = src or self.geometry.src # Create a new receiver object to store the result rec = rec or Receiver(name='rec', grid=self.model.grid, time_range=self.geometry.time_axis, coordinates=self.geometry.rec_positions) # Create the forward wavefield if not provided if u is None: u = TimeFunction(name='u', grid=self.model.grid, staggered=stagg_u, save=self.geometry.nt if save else None, time_order=time_order, space_order=self.space_order) # Create the forward wavefield if not provided if v is None: v = TimeFunction(name='v', grid=self.model.grid, staggered=stagg_v, save=self.geometry.nt if save else None, time_order=time_order, space_order=self.space_order) print("Initial Norm u", norm(u)) print("Initial Norm v", norm(v)) if kernel == 'staggered': vx, vz, vy = particle_velocity_fields(self.model, self.space_order) kwargs["vx"] = vx kwargs["vz"] = vz if vy is not None: kwargs["vy"] = vy # Pick vp and Thomsen parameters from model unless explicitly provided kwargs.update( self.model.physical_params(vp=vp, epsilon=epsilon, delta=delta, theta=theta, phi=phi)) if self.model.dim < 3: kwargs.pop('phi', None) # Execute operator and return wavefield and receiver data op = self.op_fwd(kernel, save) print(kwargs) summary = op.apply(src=src, u=u, v=v, dt=kwargs.pop('dt', self.dt), **kwargs) regnormu = norm(u) regnormv = norm(v) print("Norm u:", regnormu) print("Norm v:", regnormv) if 0: cmap = plt.cm.get_cmap("viridis") values = u.data[0, :, :, :] vistagrid = pv.UniformGrid() vistagrid.dimensions = np.array(values.shape) + 1 vistagrid.spacing = (1, 1, 1) vistagrid.origin = (0, 0, 0 ) # The bottom left corner of the data set vistagrid.cell_arrays["values"] = values.flatten(order="F") vistaslices = vistagrid.slice_orthogonal() vistagrid.plot(show_edges=True) vistaslices.plot(cmap=cmap) print("=========================================") s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=1) s_v = TimeFunction(name='s_v', grid=self.model.grid, space_order=self.space_order, time_order=1) src_u = src.inject(field=s_u.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m) src_v = src.inject(field=s_v.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m) op_f = Operator([src_u, src_v]) op_f.apply(src=src, dt=kwargs.pop('dt', self.dt)) print("Norm s_u", norm(s_u)) print("Norm s_v", norm(s_v)) # Get the nonzero indices nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple assert len(nzinds) == len(self.model.grid.shape) shape = self.model.grid.shape x, y, z = self.model.grid.dimensions time = self.model.grid.time_dim t = self.model.grid.stepping_dim source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32) source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32) print("source_id data indexes start from 0 now !!!") # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1)) source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0]))) source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1 # plot3d(source_mask.data, model) # import pdb; pdb.set_trace() print("Number of unique affected points is: %d", len(nzinds[0])) # Assert that first and last index are as expected assert (source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0) assert (source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0]) - 1) assert (source_id.data[nzinds[0][len(nzinds[0]) - 1], nzinds[1][len(nzinds[0]) - 1], nzinds[2][len(nzinds[0]) - 1]] == len(nzinds[0]) - 1) assert (np.all(np.nonzero(source_id.data)) == np.all( np.nonzero(source_mask.data))) assert (np.all(np.nonzero(source_id.data)) == np.all( np.nonzero(s_u.data[0]))) print( "-At this point source_mask and source_id have been popoulated correctly-" ) nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1]) nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x, y), space_order=0, dtype=np.int32) nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2) inds = np.where(source_mask.data == 1.) print("Grid - source positions:", inds) maxz = len(np.unique(inds[-1])) # Change only 3rd dim sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz) assert (len( nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions) - 1)) # Note : sparse_source_id is not needed as long as sparse info is kept in mask # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz] id_dim = Dimension(name='id_dim') b_dim = Dimension(name='b_dim') save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0], nzinds[1].shape[0]), dimensions=(src.dimensions[0], id_dim)) save_src_v = TimeFunction(name='save_src_v', shape=(src.shape[0], nzinds[1].shape[0]), dimensions=(src.dimensions[0], id_dim)) save_src_u_term = src.inject( field=save_src_u[src.dimensions[0], source_id], expr=src * self.model.grid.time_dim.spacing**2 / self.model.m) save_src_v_term = src.inject( field=save_src_v[src.dimensions[0], source_id], expr=src * self.model.grid.time_dim.spacing**2 / self.model.m) print("Injecting to empty grids") op1 = Operator([save_src_u_term, save_src_v_term]) op1.apply(src=src, dt=kwargs.pop('dt', self.dt)) print("Injecting to empty grids finished") sp_zi = Dimension(name='sp_zi') sp_source_mask = Function(name='sp_source_mask', shape=(list(sparse_shape)), dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32) # Now holds IDs sp_source_mask.data[inds[0], inds[1], :] = tuple( inds[-1][:len(np.unique(inds[-1]))]) assert (np.count_nonzero(sp_source_mask.data) == len(nzinds[0])) assert (len(sp_source_mask.dimensions) == 3) # import pdb; pdb.set_trace() . zind = Scalar(name='zind', dtype=np.int32) xb_size = Scalar(name='xb_size', dtype=np.int32) yb_size = Scalar(name='yb_size', dtype=np.int32) x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32) y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32) block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim, ), space_order=0, dtype=np.int32) bsizes = (8, 8, 32, 32) block_sizes.data[:] = bsizes # eqxb = Eq(xb_size, block_sizes[0]) # eqyb = Eq(yb_size, block_sizes[1]) # eqxb2 = Eq(x0_blk0_size, block_sizes[2]) # eqyb2 = Eq(y0_blk0_size, block_sizes[3]) eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1, implicit_dims=(time, x, y)) # eq1 = Eq(zind, sp_source_mask[x, sp_zi], implicit_dims=(time, x, sp_zi)) eq1 = Eq(zind, sp_source_mask[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi)) inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]] inj_v = source_mask[x, y, zind] * save_src_v[time, source_id[x, y, zind]] eq_u = Inc(u.forward[t + 1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi)) eq_v = Inc(v.forward[t + 1, x, y, zind], inj_v, implicit_dims=(time, x, y, sp_zi)) # The additional time-tiling equations # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v) performance_map = np.array([[0, 0, 0, 0, 0]]) bxstart = 4 bxend = 17 bystart = 4 byend = 17 bstep = 16 txstart = 8 txend = 9 tystart = 8 tyend = 9 tstep = 16 # Temporal autotuning for tx in range(txstart, txend, tstep): # import pdb; pdb.set_trace() for ty in range(tystart, tyend, tstep): for bx in range(bxstart, bxend, bstep): for by in range(bystart, byend, bstep): block_sizes.data[:] = [tx, ty, bx, by] eqxb = Eq(xb_size, block_sizes[0]) eqyb = Eq(yb_size, block_sizes[1]) eqxb2 = Eq(x0_blk0_size, block_sizes[2]) eqyb2 = Eq(y0_blk0_size, block_sizes[3]) u.data[:] = 0 v.data[:] = 0 print("-----") tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v) op_tt = self.op_fwd(kernel, save, tteqs) summary_tt = op_tt.apply(u=u, v=v, dt=kwargs.pop('dt', self.dt), **kwargs) norm_tt_u = norm(u) norm_tt_v = norm(v) print("Norm u:", regnormu) print("Norm v:", regnormv) print("Norm(tt_u):", norm_tt_u) print("Norm(tt_v):", norm_tt_v) print( "===Temporal blocking======================================" ) performance_map = np.append(performance_map, [[ tx, ty, bx, by, summary_tt.globals['fdlike'].gflopss ]], 0) print(performance_map) # tids = np.unique(performance_map[:, 0]) #for tid in tids: bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty)) bx_data = np.unique(performance_map[bids, 2]) by_data = np.unique(performance_map[bids, 3]) gptss_data = performance_map[bids, 4] gptss_data = gptss_data.reshape(len(bx_data), len(by_data)) fig, ax = plt.subplots() im = ax.imshow(gptss_data) pause(2) # We want to show all ticks... ax.set_xticks(np.arange(len(bx_data))) ax.set_yticks(np.arange(len(by_data))) # ... and label them with the respective list entries ax.set_xticklabels(bx_data) ax.set_yticklabels(by_data) ax.set_title( "Gpts/s for fixed tile size. (Sweeping block sizes)") fig.tight_layout() fig.colorbar(im, ax=ax) # ax = sns.heatmap(gptss_data, linewidth=0.5) plt.savefig( str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + ".pdf") if 0: cmap = plt.cm.get_cmap("viridis") values = u.data[0, :, :, :] vistagrid = pv.UniformGrid() vistagrid.dimensions = np.array(values.shape) + 1 vistagrid.spacing = (1, 1, 1) vistagrid.origin = (0, 0, 0 ) # The bottom left corner of the data set vistagrid.cell_arrays["values"] = values.flatten(order="F") vistaslices = vistagrid.slice_orthogonal() vistagrid.plot(show_edges=True) vistaslices.plot(cmap=cmap) return rec, u, v, summary
sp_source_mask = Function(name='sp_source_mask', shape=(list(sparse_shape)), dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32) # Now holds IDs sp_source_mask.data[inds[0], inds[1], :] = tuple(inds[2][:len(np.unique(inds[2]))]) assert (np.count_nonzero(sp_source_mask.data) == len(nzinds[0])) assert (len(sp_source_mask.dimensions) == 3) t = model.grid.stepping_dim zind = Scalar(name='zind', dtype=np.int32) xb_size = Scalar(name='xb_size', dtype=np.int32) yb_size = Scalar(name='yb_size', dtype=np.int32) x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32) y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32) eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1, implicit_dims=(time, x, y)) eq1 = Eq(zind, sp_source_mask[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi)) myexpr = source_mask[x, y, zind] * save_src[time, source_id[x, y, zind]] eq2 = Inc(usol.forward[t + 1, x, y, zind], myexpr, implicit_dims=(time, x, y, sp_zi))
def symbolic_size(self): return Scalar(name=self.size_name, dtype=np.int32)