def dense_assembler(device_interface, operator_descriptor, domain, dual_to_range, parameters, result): """Numba based dense assembler.""" from bempp.core.numba_kernels import select_numba_kernels from bempp.api.utils.helpers import get_type from bempp.api.integration.triangle_gauss import rule ( numba_assembly_function_regular, numba_kernel_function_regular, ) = select_numba_kernels(operator_descriptor, mode="regular") order = parameters.quadrature.regular quad_points, quad_weights = rule(order) # Perform Numba assembly always in double precision # precision = operator_descriptor.precision precision = "double" data_type = get_type(precision).real test_indices, test_color_indexptr = dual_to_range.get_elements_by_color() trial_indices, trial_color_indexptr = domain.get_elements_by_color() number_of_test_colors = len(test_color_indexptr) - 1 # number_of_trial_colors = len(trial_color_indexptr) - 1 # rows = dual_to_range.global_dof_count # cols = domain.global_dof_count nshape_test = dual_to_range.number_of_shape_functions nshape_trial = domain.number_of_shape_functions grids_identical = domain.grid == dual_to_range.grid for test_color_index in range(number_of_test_colors): numba_assembly_function_regular( dual_to_range.grid.data(precision), domain.grid.data(precision), nshape_test, nshape_trial, test_indices[test_color_indexptr[test_color_index]: test_color_indexptr[1 + test_color_index]], trial_indices, dual_to_range.local_multipliers.astype(data_type), domain.local_multipliers.astype(data_type), dual_to_range.local2global, domain.local2global, dual_to_range.normal_multipliers, domain.normal_multipliers, quad_points.astype(data_type), quad_weights.astype(data_type), numba_kernel_function_regular, _np.array(operator_descriptor.options, dtype=data_type), grids_identical, dual_to_range.shapeset.evaluate, domain.shapeset.evaluate, result, )
def potential_assembler(device_interface, space, operator_descriptor, points, parameters): """Return an evaluator function to evaluate a potential.""" from bempp.core.numba_kernels import select_numba_kernels from bempp.api.integration.triangle_gauss import rule from bempp.api.utils.helpers import get_type (numba_assembly_function, numba_kernel_function_regular) = select_numba_kernels(operator_descriptor, mode="potential") quad_points, quad_weights = rule(parameters.quadrature.regular) # Perform Numba assembly always in double precision # precision = operator_descriptor.precision precision = "double" dtype = _np.dtype(get_type(precision).real) if operator_descriptor.is_complex: result_type = _np.dtype(get_type(precision).complex) else: result_type = dtype kernel_dimension = operator_descriptor.kernel_dimension points_transformed = points.astype(dtype) grid_data = space.grid.data(precision) kernel_parameters = _np.array(operator_descriptor.options, dtype=dtype) def evaluator(x): """Actually evaluate the potential.""" return numba_assembly_function( dtype, result_type, kernel_dimension, points_transformed, x.astype(result_type), grid_data, quad_points.astype(precision), quad_weights.astype(precision), space.number_of_shape_functions, space.shapeset.evaluate, numba_kernel_function_regular, kernel_parameters, space.normal_multipliers, space.support_elements, ) return evaluator
def compute_rwg_basis_transform(space, quadrature_order): """Compute the transformation matrices for RWG basis functions.""" from bempp.api.integration.triangle_gauss import rule from scipy.sparse import coo_matrix from scipy.sparse.linalg import aslinearoperator from bempp.api.space.shapesets import _rwg0_shapeset_evaluate from bempp.api.space.maxwell_spaces import _numba_rwg0_evaluate grid_data = space.grid.data("double") number_of_elements = space.grid.number_of_elements quad_points, weights = rule(quadrature_order) npoints = len(weights) dof_count = space.localised_space.grid_dof_count shapeset_evaluate = _rwg0_shapeset_evaluate basis_eval = _numba_rwg0_evaluate data, iind, jind = compute_rwg_basis_transform_impl( grid_data, shapeset_evaluate, basis_eval, space.support_elements, space.localised_space.local_multipliers, space.normal_multipliers, quad_points, weights, ) basis_transforms = [] basis_transforms_transpose = [] for index in range(3): basis_transforms.append( aslinearoperator( coo_matrix( (data[index, :], (iind, jind)), shape=(npoints * number_of_elements, dof_count), ).tocsr()) @ aslinearoperator(space.map_to_localised_space) @ aslinearoperator(space.dof_transformation)) basis_transforms_transpose.append( aslinearoperator(space.dof_transformation.T) @ aslinearoperator( space.map_to_localised_space.T) @ aslinearoperator( coo_matrix( (data[index, :], (jind, iind)), shape=(dof_count, npoints * number_of_elements), ).tocsr())) return basis_transforms, basis_transforms_transpose
def compute_p1_curl_transformation(space, quadrature_order): """ Compute the transformation of P1 space coefficients to surface curl values. Returns two lists, curl_transforms and curl_transforms_transpose. The jth matrix in curl_transforms is the map from P1 function space coefficients (or extended space built upon P1 type spaces) to the jth component of the surface curl evaluated at the quadrature points, multiplied with the quadrature weights and integration element. The list curl_transforms_transpose contains the transpose of these matrices. """ from bempp.api.integration.triangle_gauss import rule from scipy.sparse import coo_matrix from scipy.sparse.linalg import aslinearoperator grid_data = space.grid.data("double") number_of_elements = space.grid.number_of_elements quad_points, weights = rule(quadrature_order) npoints = len(weights) dof_count = space.localised_space.grid_dof_count data, iind, jind = compute_p1_curl_transformation_impl( grid_data, space.support_elements, space.normal_multipliers, quad_points, weights, ) curl_transforms = [] curl_transforms_transpose = [] for index in range(3): curl_transforms.append( aslinearoperator( coo_matrix( (data[index, :], (iind, jind)), shape=(npoints * number_of_elements, dof_count), ).tocsr()) @ aslinearoperator(space.map_to_localised_space) @ aslinearoperator(space.dof_transformation)) curl_transforms_transpose.append( aslinearoperator(space.dof_transformation.T) @ aslinearoperator( space.map_to_localised_space.T) @ aslinearoperator( coo_matrix( (data[index, :], (jind, iind)), shape=(dof_count, npoints * number_of_elements), ).tocsr())) return curl_transforms, curl_transforms_transpose
def map_space_to_points(space, quadrature_order=None, return_transpose=False): """Return mapper from grid coeffs to point evaluations.""" import bempp.api from scipy.sparse import coo_matrix from scipy.sparse.linalg import aslinearoperator from bempp.api.integration.triangle_gauss import rule grid = space.grid if quadrature_order is None: quadrature_order = bempp.api.GLOBAL_PARAMETERS.quadrature.regular local_points, weights = rule(quadrature_order) number_of_local_points = local_points.shape[1] number_of_vertices = number_of_local_points * grid.number_of_elements data, global_indices, vertex_indices = map_space_to_points_impl( grid.data("double"), space.localised_space.local2global, space.localised_space.local_multipliers, space.localised_space.normal_multipliers, space.support_elements, space.numba_evaluate, space.shapeset.evaluate, local_points, weights, space.number_of_shape_functions, ) if return_transpose: transform = coo_matrix( (data, (global_indices, vertex_indices)), shape=(space.localised_space.grid_dof_count, number_of_vertices), ) return ( aslinearoperator(space.dof_transformation.T) @ aslinearoperator( space.map_to_localised_space.T) @ aslinearoperator(transform)) else: transform = coo_matrix( (data, (vertex_indices, global_indices)), shape=(number_of_vertices, space.localised_space.grid_dof_count), ) return (aslinearoperator(transform) @ aslinearoperator( space.map_to_localised_space) @ aslinearoperator( space.dof_transformation))
def integrate(self): """Integrate grid function over a grid.""" from bempp.api.integration.triangle_gauss import rule points, weights = rule(self._parameters.quadrature.regular) return _integrate( self.grid_coefficients, self.space.grid.data, self.space.support_elements, self.space.local2global, self.space.local_multipliers, self.space.normal_multipliers, self.space.numba_evaluate, self.space.shapeset.evaluate, points, weights, self.component_count, self.space.number_of_shape_functions, )
def potential_assembler(device_interface, space, operator_descriptor, points, parameters): """Assemble dense with OpenCL.""" import bempp.api from bempp.api.integration.triangle_gauss import rule from bempp.api.utils.helpers import get_type from bempp.core.opencl_kernels import get_kernel_from_name from bempp.core.opencl_kernels import get_kernel_from_operator_descriptor from bempp.core.opencl_kernels import ( default_context, default_device, get_vector_width, ) if bempp.api.POTENTIAL_OPERATOR_DEVICE_TYPE == "gpu": device_type = "gpu" elif bempp.api.POTENTIAL_OPERATOR_DEVICE_TYPE == "cpu": device_type = "cpu" else: raise RuntimeError( f"Unknown device type {bempp.api.POTENTIAL_OPERATOR_DEVICE_TYPE}") mf = _cl.mem_flags ctx = default_context(device_type) device = default_device(device_type) quad_points, quad_weights = rule(parameters.quadrature.regular) precision = operator_descriptor.precision dtype = get_type(precision).real kernel_options = operator_descriptor.options kernel_dimension = operator_descriptor.kernel_dimension if operator_descriptor.is_complex: result_type = _np.dtype(get_type(precision).complex) else: result_type = dtype result_type = _np.dtype(result_type) indices = space.support_elements nelements = len(indices) vector_width = get_vector_width(precision, device_type=device_type) npoints = points.shape[1] remainder_size = nelements % WORKGROUP_SIZE_POTENTIAL main_size = nelements - remainder_size main_kernel = None remainder_kernel = None sum_kernel = None options = { "NUMBER_OF_QUAD_POINTS": len(quad_weights), "SHAPESET": space.shapeset.identifier, "NUMBER_OF_SHAPE_FUNCTIONS": space.number_of_shape_functions, "WORKGROUP_SIZE": WORKGROUP_SIZE_POTENTIAL // vector_width, } if operator_descriptor.is_complex: options["COMPLEX_KERNEL"] = None options["COMPLEX_COEFFICIENTS"] = None options["COMPLEX_RESULT"] = None if main_size > 0: main_kernel = get_kernel_from_operator_descriptor( operator_descriptor, options, "potential", device_type=device_type) sum_kernel = get_kernel_from_name("sum_for_potential_novec", options, precision, device_type=device_type) if remainder_size > 0: options["WORKGROUP_SIZE"] = remainder_size remainder_kernel = get_kernel_from_operator_descriptor( operator_descriptor, options, "potential", force_novec=True, device_type=device_type, ) indices_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=indices) normals_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=space.normal_multipliers) points_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=points.ravel(order="F").astype(dtype), ) grid_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=space.grid.as_array.astype(dtype)) # elements_buffer = _cl.Buffer( # ctx, # mf.READ_ONLY | mf.COPY_HOST_PTR, # hostbuf=space.grid.elements.ravel(order="F"), # ) quad_points_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=quad_points.ravel(order="F").astype(dtype), ) quad_weights_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=quad_weights.astype(dtype)) result_buffer = _cl.Buffer(ctx, mf.READ_WRITE, size=result_type.itemsize * kernel_dimension * npoints) coefficients_buffer = _cl.Buffer(ctx, mf.READ_ONLY, size=result_type.itemsize * space.map_to_full_grid.shape[0]) if main_size > 0: sum_size = (kernel_dimension * npoints * (nelements // WORKGROUP_SIZE_POTENTIAL) * result_type.itemsize) sum_buffer = _cl.Buffer(ctx, mf.READ_WRITE, size=sum_size) if not kernel_options: kernel_options = [0.0] kernel_options_array = _np.array(kernel_options, dtype=dtype) kernel_options_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=kernel_options_array) def evaluator(x): """Evaluate a potential.""" result = _np.empty(kernel_dimension * npoints, dtype=result_type) with _cl.CommandQueue(ctx, device=device) as queue: _cl.enqueue_copy(queue, coefficients_buffer, x.astype(result_type)) _cl.enqueue_fill_buffer( queue, result_buffer, _np.uint8(0), 0, kernel_dimension * npoints * result_type.itemsize, ) if main_size > 0: _cl.enqueue_fill_buffer(queue, sum_buffer, _np.uint8(0), 0, sum_size) queue.finish() main_kernel( queue, (npoints, main_size // vector_width), (1, WORKGROUP_SIZE_POTENTIAL // vector_width), grid_buffer, indices_buffer, normals_buffer, points_buffer, coefficients_buffer, quad_points_buffer, quad_weights_buffer, sum_buffer, kernel_options_buffer, ) sum_kernel( queue, (kernel_dimension * npoints, ), (1, ), sum_buffer, result_buffer, _np.uint32(nelements // WORKGROUP_SIZE_POTENTIAL), ) if remainder_size > 0: remainder_kernel( queue, (npoints, remainder_size), (1, remainder_size), grid_buffer, indices_buffer, normals_buffer, points_buffer, coefficients_buffer, quad_points_buffer, quad_weights_buffer, result_buffer, kernel_options_buffer, global_offset=(0, main_size), ) _cl.enqueue_copy(queue, result, result_buffer) return result return evaluator
def dense_assembler(device_interface, operator_descriptor, domain, dual_to_range, parameters, result): """Assemble dense with OpenCL.""" import bempp.api from bempp.api.integration.triangle_gauss import rule from bempp.api.utils.helpers import get_type from bempp.core.opencl_kernels import get_kernel_from_operator_descriptor from bempp.core.opencl_kernels import ( default_context, default_device, get_vector_width, ) if bempp.api.BOUNDARY_OPERATOR_DEVICE_TYPE == "gpu": device_type = "gpu" elif bempp.api.BOUNDARY_OPERATOR_DEVICE_TYPE == "cpu": device_type = "cpu" else: raise RuntimeError( f"Unknown device type {bempp.api.POTENTIAL_OPERATOR_DEVICE_TYPE}") mf = _cl.mem_flags ctx = default_context(device_type) device = default_device(device_type) precision = operator_descriptor.precision dtype = get_type(precision).real kernel_options = operator_descriptor.options quad_points, quad_weights = rule(parameters.quadrature.regular) test_indices, test_color_indexptr = dual_to_range.get_elements_by_color() trial_indices, trial_color_indexptr = domain.get_elements_by_color() number_of_test_colors = len(test_color_indexptr) - 1 number_of_trial_colors = len(trial_color_indexptr) - 1 options = { "NUMBER_OF_QUAD_POINTS": len(quad_weights), "TEST": dual_to_range.shapeset.identifier, "TRIAL": domain.shapeset.identifier, "TRIAL_NUMBER_OF_ELEMENTS": domain.number_of_support_elements, "TEST_NUMBER_OF_ELEMENTS": dual_to_range.number_of_support_elements, "NUMBER_OF_TEST_SHAPE_FUNCTIONS": dual_to_range.number_of_shape_functions, "NUMBER_OF_TRIAL_SHAPE_FUNCTIONS": domain.number_of_shape_functions, } if operator_descriptor.is_complex: options["COMPLEX_KERNEL"] = None main_kernel = get_kernel_from_operator_descriptor(operator_descriptor, options, "regular", device_type=device_type) remainder_kernel = get_kernel_from_operator_descriptor( operator_descriptor, options, "regular", force_novec=True, device_type=device_type, ) test_indices_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=test_indices) trial_indices_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=trial_indices) test_normals_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dual_to_range.normal_multipliers) trial_normals_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=domain.normal_multipliers) test_grid_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dual_to_range.grid.as_array.astype(dtype), ) trial_grid_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=domain.grid.as_array.astype(dtype)) test_elements_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dual_to_range.grid.elements.ravel(order="F"), ) trial_elements_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=domain.grid.elements.ravel(order="F"), ) test_local2global_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dual_to_range.local2global) trial_local2global_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=domain.local2global) test_multipliers_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dual_to_range.local_multipliers.astype(dtype), ) trial_multipliers_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=domain.local_multipliers.astype(dtype), ) quad_points_buffer = _cl.Buffer( ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=quad_points.ravel(order="F").astype(dtype), ) quad_weights_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=quad_weights.astype(dtype)) result_buffer = _cl.Buffer(ctx, mf.READ_WRITE, size=result.nbytes) if not kernel_options: kernel_options = [0.0] kernel_options_array = _np.array(kernel_options, dtype=dtype) kernel_options_buffer = _cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=kernel_options_array) vector_width = get_vector_width(precision, device_type=device_type) def kernel_runner( queue, test_offset, trial_offset, test_number_of_indices, trial_number_of_indices, ): """Actually run the kernel for a given range.""" remainder_size = trial_number_of_indices % vector_width main_size = trial_number_of_indices - remainder_size buffers = [ test_indices_buffer, trial_indices_buffer, test_normals_buffer, trial_normals_buffer, test_grid_buffer, trial_grid_buffer, test_elements_buffer, trial_elements_buffer, test_local2global_buffer, trial_local2global_buffer, test_multipliers_buffer, trial_multipliers_buffer, quad_points_buffer, quad_weights_buffer, result_buffer, kernel_options_buffer, _np.int32(dual_to_range.global_dof_count), _np.int32(domain.global_dof_count), _np.uint8(domain.grid != dual_to_range.grid), ] if main_size > 0: main_kernel( queue, (test_number_of_indices, main_size // vector_width), (1, 1), *buffers, global_offset=(test_offset, trial_offset), ) if remainder_size > 0: remainder_kernel( queue, (test_number_of_indices, remainder_size), (1, 1), *buffers, global_offset=(test_offset, trial_offset + main_size), ) with _cl.CommandQueue(ctx, device=device) as queue: _cl.enqueue_fill_buffer(queue, result_buffer, _np.uint8(0), 0, result.nbytes) for test_index in range(number_of_test_colors): test_offset = test_color_indexptr[test_index] n_test_indices = (test_color_indexptr[1 + test_index] - test_color_indexptr[test_index]) for trial_index in range(number_of_trial_colors): n_trial_indices = (trial_color_indexptr[1 + trial_index] - trial_color_indexptr[trial_index]) trial_offset = trial_color_indexptr[trial_index] kernel_runner(queue, test_offset, trial_offset, n_test_indices, n_trial_indices) _cl.enqueue_copy(queue, result, result_buffer)
def _setup( self, domain, dual_to_range, regular_order, singular_order, expansion_order=10, max_level=-1, ): """Setup the Fmm computation.""" import bempp.api from .common import map_space_to_points from .common import grid_to_points from .common import LeafNode from exafmm_laplace import init_sources from exafmm_laplace import init_targets from exafmm_laplace import build_tree from exafmm_laplace import build_list from bempp.api.integration.triangle_gauss import rule import exafmm_laplace self._domain = domain self._dual_to_range = dual_to_range self._regular_order = regular_order self._singular_order = singular_order self._expansion_order = expansion_order self._shape = (dual_to_range.global_dof_count, domain.global_dof_count) self._local_points, self._weights = rule(regular_order) if max_level == -1: max_level = compute_max_level(domain, dual_to_range) if max_level < 0: raise ValueError("Could not correctly determine maximum level.") exafmm_laplace.configure(expansion_order, _NCRITICAL, max_level) self._sources = grid_to_points(self._domain.grid, self._local_points) self._targets = grid_to_points(self._dual_to_range.grid, self._local_points) source_bodies = init_sources( self._sources, _np.zeros(len(self._sources), dtype=_np.float64) ) target_bodies = init_targets(self._targets) build_tree(source_bodies, target_bodies) exafmm_nodes = build_list(True) with bempp.api.Timer() as t: for exafmm_node in exafmm_nodes: if not exafmm_node.is_leaf: continue self._leaf_nodes[exafmm_node.key] = LeafNode( exafmm_node.key, exafmm_node.isrcs, exafmm_node.itrgs, [node.key for node in exafmm_node.colleagues if node is not None], ) bempp.api.log(f"Time for node data structures: {t.interval}") with bempp.api.Timer() as t: self._source_transform = map_space_to_points( self._domain, self._local_points, self._weights, "source" ) bempp.api.log(f"Time for domain map: {t.interval}") with bempp.api.Timer() as t: self._target_transform = map_space_to_points( self._dual_to_range, self._local_points, self._weights, "target", return_transpose=True, ) bempp.api.log(f"Time for dual map: {t.interval}") self._compute_near_field_matrix() with bempp.api.Timer() as t: exafmm_laplace.precompute() bempp.api.log(f"Time for FMM precomputation. {t.interval}")
def __init__( self, space, dual_space=None, fun=None, coefficients=None, projections=None, parameters=None, ): """ Construct a grid function. A grid function can be initialized in three different ways. 1. By providing a Python callable. Any Python callable of the following form is valid.:: callable(x,n,domain_index,result) Here, x, n, and result are all numpy arrays. x contains the current evaluation point, n the associated outward normal direction and result is a numpy array that will store the result of the Python callable. The variable domain_index stores the index of the subdomain on which x lies (default 0). This makes it possible to define different functions for different subdomains. The following example defines input data that is the inner product of the coordinate x with the normal direction n.:: fun(x,n,domain_index,result): result[0] = np.dot(x,n) 2. By providing a vector of coefficients at the nodes. This is preferable if the coefficients of the data are coming from an external code. 3. By providing a vector of projection data and a corresponding dual space. Parameters ---------- space : bempp.api.space.Space The space over which the GridFunction is defined. dual_space : bempp.api.Space A representation of the dual space. If not specified then space == dual_space is assumed (optional). fun : callable A Python function from which the GridFunction is constructed (optional). coefficients : np.ndarray A 1-dimensional array with the coefficients of the GridFunction at the interpolatoin points of the space (optional). projections : np.ndarray A 1-dimensional array with the projections of the GridFunction onto a dual space (optional). parameters : bempp.api.ParameterList A ParameterList object used for the assembly of the GridFunction (optional). Notes ----- * Only one of projections, coefficients, or fun is allowed as parameter. Examples -------- To create a GridFunction from a Python callable my_fun use >>> grid_function = GridFunction(space, fun=my_fun) To create a GridFunction from a vector of coefficients coeffs use >>> grid_function = GridFunction(space,coefficients=coeffs) To create a GridFunction from a vector of projections proj use >>> grid_function = GridFunction( space,dual_space=dual_space, projections=proj) """ from bempp.api.utils.helpers import assign_parameters from bempp.api.space.space import return_compatible_representation self._space = None self._dual_space = None self._coefficients = None self._grid_coefficients = None self._projections = None self._representation = None if dual_space is None: dual_space = space self._space, self._dual_space = space, dual_space # Now check that space and dual are defined over same grid # with the same normal directions. If one space is barycentric, # need to take this into account. comp_domain, comp_dual = return_compatible_representation(space, dual_space) self._comp_domain = comp_domain self._comp_dual = comp_dual if ( not comp_domain.grid == comp_dual.grid or not _np.all( comp_domain.normal_multipliers == comp_dual.normal_multipliers ) ): raise ValueError( "Space and dual space must be defined on the " + "same grid with same normal directions." ) self._parameters = assign_parameters(parameters) if sum(1 for e in [fun, coefficients, projections] if e is not None) != 1: raise ValueError( "Exactly one of 'fun', 'coefficients' or 'projections' " + "must be nonzero." ) if coefficients is not None: self._coefficients = coefficients self._representation = "primal" if projections is not None: self._projections = projections self._representation = "dual" if fun is not None: from bempp.api.integration.triangle_gauss import rule points, weights = rule(self._parameters.quadrature.regular) if fun.bempp_type == "real": dtype = "float64" else: dtype = "complex128" grid_projections = _np.zeros(comp_dual.grid_dof_count, dtype=dtype) # Create a Numba callable from the function _project_function( fun, comp_dual.grid.data, comp_dual.support_elements, comp_dual.local2global, comp_dual.local_multipliers, comp_dual.normal_multipliers, comp_dual.numba_evaluate, comp_dual.shapeset.evaluate, points, weights, comp_domain.codomain_dimension, grid_projections, ) self._projections = comp_dual.dof_transformation.T @ grid_projections self._representation = "dual"
def from_grid(cls, source_grid, mode, wavenumber=None, target_grid=None, precision="double"): """ Initialise an Exafmm instance from a given source and target grid. Parameters ---------- source_grid : Grid object Grid for the source points. mode: string Fmm mode. One of 'laplace', 'helmholtz', or 'modified_helmholtz' wavenumber : real number For Helmholtz or modified Helmholtz the wavenumber. target_grid : Grid object An optional target grid. If not provided the source and target grid are assumed to be identical. precision : string Either 'single' or 'double'. Currently, the Fmm is always executed in double precision. """ import bempp.api from bempp.api.integration.triangle_gauss import rule from bempp.api.fmm.helpers import get_local_interaction_operator import numpy as np quadrature_order = bempp.api.GLOBAL_PARAMETERS.quadrature.regular local_points, weights = rule(quadrature_order) if target_grid is None: target_grid = source_grid source_points = source_grid.map_to_point_cloud(quadrature_order, precision=precision) if target_grid != source_grid: target_points = target_grid.map_to_point_cloud(quadrature_order, precision=precision) else: target_points = source_points singular_correction = None if target_grid == source_grid: # Require singular correction terms. if mode == "laplace": singular_correction = get_local_interaction_operator( source_grid, local_points, "laplace", np.array([], dtype="float64"), precision, False, ) elif mode == "helmholtz": singular_correction = get_local_interaction_operator( source_grid, local_points, "helmholtz", np.array([_np.real(wavenumber), _np.imag(wavenumber)], dtype="float64"), precision, True, ) elif mode == "modified_helmholtz": singular_correction = get_local_interaction_operator( source_grid, local_points, "modified_helmholtz", np.array([wavenumber], dtype="float64"), precision, False, ) return cls( source_points, target_points, mode, wavenumber=wavenumber, depth=bempp.api.GLOBAL_PARAMETERS.fmm.depth, expansion_order=bempp.api.GLOBAL_PARAMETERS.fmm.expansion_order, ncrit=bempp.api.GLOBAL_PARAMETERS.fmm.ncrit, precision=precision, singular_correction=singular_correction, )
def test_vectorized_assembly(): """Test vectorized assembly of grid functions.""" from bempp.api.integration.triangle_gauss import rule from bempp.api.assembly.grid_function import get_function_quadrature_information grid = bempp.api.shapes.cube() p1_space = bempp.api.function_space(grid, "P", 1, segments=[1, 2], swapped_normals=[1]) direction = np.array([1, 2, 3]) / np.sqrt(14) k = 2 points, _ = rule(4) npoints = points.shape[1] grid_data = p1_space.grid.data("double") ( global_points, global_normals, global_domain_indices, ) = get_function_quadrature_information(grid_data, p1_space.support_elements, p1_space.normal_multipliers, points) for index, element in enumerate(p1_space.support_elements): element_global_points = grid_data.local2global(element, points) np.testing.assert_allclose( global_points[:, index * npoints:(1 + index) * npoints], element_global_points, ) for local_index in range(npoints): np.testing.assert_allclose( global_normals[:, index * npoints + local_index], grid_data.normals[element] * p1_space.normal_multipliers[element], ) assert (global_domain_indices[index * npoints + local_index] == grid_data.domain_indices[element]) @bempp.api.callable(complex=True) def fun_non_vec(x, n, d, res): res[0] = np.dot(n, direction) * 1j * k * np.exp( 1j * k * np.dot(x, direction)) @bempp.api.callable(complex=True, vectorized=True) def fun_vec(x, n, d, res): res[0, :] = (np.dot(direction, n) * 1j * k * np.exp(1j * k * np.dot(direction, x))) grid_fun_non_vec = bempp.api.GridFunction(p1_space, fun=fun_non_vec) grid_fun_vec = bempp.api.GridFunction(p1_space, fun=fun_vec) rel_diff = np.abs(grid_fun_non_vec.projections() - grid_fun_vec.projections()) / np.abs( grid_fun_non_vec.projections()) assert np.max(rel_diff) < 1e-14
def _assemble(self): """Assemble the operator.""" from bempp.api.space.space import return_compatible_representation from bempp.api.assembly.discrete_boundary_operator import ( SparseDiscreteBoundaryOperator, ) from bempp.api.integration.triangle_gauss import rule from scipy.sparse import coo_matrix import numpy as _np points, weights = rule(self._parameters.quadrature.regular) npoints = len(weights) comp_trial, comp_test, comp_fun = return_compatible_representation( self.domain, self.dual_to_range, self._grid_fun.space) grid = comp_trial.grid if self._mode == "component": op = _np.multiply elif self._mode == "inner": op = lambda x, y: _np.sum( x * y.reshape[:, _np.newaxis, :], axis=0, keepdims=True) elements = (set(comp_test.support_elements).intersection( set(comp_trial.support_elements)).intersection( set(comp_fun.support_elements))) elements = _np.flatnonzero(comp_trial.support * comp_test.support * comp_fun.support) number_of_elements = len(elements) nshape_trial = comp_trial.shapeset.number_of_shape_functions nshape_test = comp_test.shapeset.number_of_shape_functions nshape = nshape_trial * nshape_test data = _np.zeros(number_of_elements * nshape_trial * nshape_test) for index, elem_index in enumerate(elements): scale_vals = (self._grid_fun.evaluate(elem_index, points) * weights * grid.integration_elements[index]) domain_vals = comp_trial.evaluate(elem_index, points) trial_vals = op(domain_vals, scale_vals) test_vals = _np.conj(comp_test.evaluate(elem_index, points)) res = _np.tensordot(test_vals, trial_vals, axes=([0, 2], [0, 2])) data[nshape * index:nshape * (1 + index)] = res.ravel() irange = _np.arange(nshape_test) jrange = _np.arange(nshape_trial) rows = _np.tile(_np.repeat(irange, nshape_trial), number_of_elements) + _np.repeat( elements * nshape_test, nshape) cols = _np.tile(_np.tile(jrange, nshape_test), number_of_elements) + _np.repeat( elements * nshape_trial, nshape) new_rows = comp_test.local2global.ravel()[rows] new_cols = comp_trial.local2global.ravel()[cols] nrows = comp_test.dof_transformation.shape[0] ncols = comp_trial.dof_transformation.shape[0] mat = coo_matrix((data, (new_rows, new_cols)), shape=(nrows, ncols)).tocsr() if comp_trial.requires_dof_transformation: mat = mat @ self.domain.dof_transformation if comp_test.requires_dof_transformation: mat = self.dual_to_range.dof_transformation.T @ mat return SparseDiscreteBoundaryOperator(mat)