def test_fd_space_staggered(self, space_order, stagger): """ This test compares the discrete finite-difference scheme against polynomials For a given order p, the finite difference scheme should be exact for polynomials of order p """ # dummy axis dimension nx = 100 xx = np.linspace(-1, 1, nx) dx = xx[1] - xx[0] # Symbolic data grid = Grid(shape=(nx, ), dtype=np.float32) x = grid.dimensions[0] # Location of the staggered function if stagger == left: off = -.5 side = -x xx2 = xx - off * dx elif stagger == right: off = .5 side = x xx2 = xx - off * dx else: off = 0 side = NODE xx2 = xx u = Function(name="u", grid=grid, space_order=space_order, staggered=(side, )) du = Function(name="du", grid=grid, space_order=space_order) # Define polynomial with exact fd coeffs = np.ones((space_order - 1, ), dtype=np.float32) polynome = sum([coeffs[i] * x**i for i in range(0, space_order - 1)]) polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32) # Fill original data with the polynomial values u.data[:] = polyvalues # True derivative of the polynome Dpolynome = diff(polynome) Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32) # FD derivative, symbolic u_deriv = generic_derivative(u, deriv_order=1, fd_order=space_order, dim=x, stagger=stagger) # Compute numerical FD stencil = Eq(du, u_deriv) op = Operator(stencil, subs={x.spacing: dx}) op.apply() # Check exactness of the numerical derivative except inside space_brd space_border = space_order error = abs(du.data[space_border:-space_border] - Dpolyvalues[space_border:-space_border]) assert np.isclose(np.mean(error), 0., atol=1e-3)
def test_fd_space_staggered(self, space_order, stagger): """ This test compares the discrete finite-difference scheme against polynomials For a given order p, the finite difference scheme should be exact for polynomials of order p :param derivative: name of the derivative to be tested :param space_order: space order of the finite difference stencil """ clear_cache() # dummy axis dimension nx = 100 xx = np.linspace(-1, 1, nx) dx = xx[1] - xx[0] # Symbolic data grid = Grid(shape=(nx,), dtype=np.float32) x = grid.dimensions[0] # Location of the staggered function if stagger == left: off = -.5 side = -x xx2 = xx - off * dx elif stagger == right: off = .5 side = x xx2 = xx[:-1] - off * dx else: off = 0 side = NODE xx2 = xx u = Function(name="u", grid=grid, space_order=space_order, staggered=(side,)) du = Function(name="du", grid=grid, space_order=space_order) # Define polynomial with exact fd coeffs = np.ones((space_order-1,), dtype=np.float32) polynome = sum([coeffs[i]*x**i for i in range(0, space_order-1)]) polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32) # Fill original data with the polynomial values u.data[:] = polyvalues # True derivative of the polynome Dpolynome = diff(polynome) Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32) # FD derivative, symbolic u_deriv = generic_derivative(u, deriv_order=1, fd_order=space_order, dim=x, stagger=stagger) # Compute numerical FD stencil = Eq(du, u_deriv) op = Operator(stencil, subs={x.spacing: dx}) op.apply() # Check exactness of the numerical derivative except inside space_brd space_border = space_order error = abs(du.data[space_border:-space_border] - Dpolyvalues[space_border:-space_border]) assert np.isclose(np.mean(error), 0., atol=1e-3)
def gaussian_smooth(f, sigma=1, truncate=4.0, mode='reflect'): """ Gaussian smooth function. Parameters ---------- f : Function The left-hand side of the smoothing kernel, that is the smoothed Function. sigma : float, optional Standard deviation. Default is 1. truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. mode : str, optional The function initialisation mode. 'constant' and 'reflect' are accepted. Default mode is 'reflect'. """ class ObjectiveDomain(dv.SubDomain): name = 'objective_domain' def __init__(self, lw): super(ObjectiveDomain, self).__init__() self.lw = lw def define(self, dimensions): return {d: ('middle', l, l) for d, l in zip(dimensions, self.lw)} def create_gaussian_weights(sigma, lw): weights = [ w / w.sum() for w in (np.exp(-0.5 / s**2 * (np.linspace(-l, l, 2 * l + 1))**2) for s, l in zip(sigma, lw)) ] processed = [] for w in weights: temp = list(w) while len(temp) < 2 * max(lw) + 1: temp.insert(0, 0) temp.append(0) processed.append(np.array(temp)) return as_tuple(processed) def fset(f, g): indices = [slice(l, -l, 1) for _, l in zip(g.dimensions, lw)] slices = (slice(None, None, 1), ) * g.ndim if isinstance(f, np.ndarray): f[slices] = g.data[tuple(indices)] elif isinstance(f, dv.Function): f.data[slices] = g.data[tuple(indices)] else: raise NotImplementedError try: # NOTE: required if input is an np.array dtype = f.dtype.type shape = f.shape except AttributeError: dtype = f.dtype shape = f.shape_global # TODO: Add s = 0 dim skip option lw = tuple(int(truncate * float(s) + 0.5) for s in as_tuple(sigma)) if len(lw) == 1 and len(lw) < f.ndim: lw = f.ndim * (lw[0], ) sigma = f.ndim * (as_tuple(sigma)[0], ) elif len(lw) == f.ndim: sigma = as_tuple(sigma) else: raise ValueError("`sigma` must be an integer or a tuple of length" + " `f.ndim`.") # Create the padded grid: objective_domain = ObjectiveDomain(lw) shape_padded = tuple([np.array(s) + 2 * l for s, l in zip(shape, lw)]) grid = dv.Grid(shape=shape_padded, subdomains=objective_domain) f_c = dv.Function(name='f_c', grid=grid, space_order=2 * max(lw), coefficients='symbolic', dtype=dtype) f_o = dv.Function(name='f_o', grid=grid, dtype=dtype) weights = create_gaussian_weights(sigma, lw) mapper = {} for d, l, w in zip(f_c.dimensions, lw, weights): lhs = [] rhs = [] options = [] lhs.append(f_o) rhs.append(dv.generic_derivative(f_c, d, 2 * l, 1)) coeffs = dv.Coefficient(1, f_c, d, w) options.append({ 'coefficients': dv.Substitutions(coeffs), 'subdomain': grid.subdomains['objective_domain'] }) lhs.append(f_c) rhs.append(f_o) options.append({'subdomain': grid.subdomains['objective_domain']}) mapper[d] = {'lhs': lhs, 'rhs': rhs, 'options': options} # Note: we impose the smoother runs on the host as there's generally not # enough parallelism to be performant on a device platform = 'cpu64' initialize_function(f_c, f, lw, mapper=mapper, mode='reflect', name='smooth', platform=platform) fset(f, f_c) return f
def gaussian_smooth(f, sigma=1, _order=4, mode='reflect'): """ Gaussian smooth function. """ class ObjectiveDomain(dv.SubDomain): name = 'objective_domain' def __init__(self, lw): super(ObjectiveDomain, self).__init__() self.lw = lw def define(self, dimensions): return {d: ('middle', self.lw, self.lw) for d in dimensions} def fset(f, g): indices = [slice(lw, -lw, 1) for _ in g.grid.dimensions] slices = (slice(None, None, 1), ) * len(g.grid.dimensions) if isinstance(f, np.ndarray): f[slices] = g.data[tuple(indices)] elif isinstance(f, dv.Function): f.data[slices] = g.data[tuple(indices)] else: raise NotImplementedError lw = int(_order * sigma + 0.5) # Create the padded grid: objective_domain = ObjectiveDomain(lw) try: shape_padded = np.array(f.grid.shape) + 2 * lw except AttributeError: shape_padded = np.array(f.shape) + 2 * lw grid = dv.Grid(shape=shape_padded, subdomains=objective_domain) f_c = dv.Function(name='f_c', grid=grid, space_order=2 * lw, coefficients='symbolic', dtype=np.int32) f_o = dv.Function(name='f_o', grid=grid, coefficients='symbolic', dtype=np.int32) weights = np.exp(-0.5 / sigma**2 * (np.linspace(-lw, lw, 2 * lw + 1))**2) weights = weights / weights.sum() mapper = {} for d in f_c.dimensions: lhs = [] rhs = [] options = [] lhs.append(f_o) rhs.append(dv.generic_derivative(f_c, d, 2 * lw, 1)) coeffs = dv.Coefficient(1, f_c, d, weights) options.append({ 'coefficients': dv.Substitutions(coeffs), 'subdomain': grid.subdomains['objective_domain'] }) lhs.append(f_c) rhs.append(f_o) options.append({'subdomain': grid.subdomains['objective_domain']}) mapper[d] = {'lhs': lhs, 'rhs': rhs, 'options': options} initialize_function(f_c, f.data[:], lw, mapper=mapper, mode='reflect', name='smooth') fset(f, f_c) return f