def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, access): """Define x or J as a linear combination of coordinate dofs with given table data.""" L = self.language # Get properties of domain domain = mt.terminal.ufl_domain() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() num_scalar_dofs = create_element(coordinate_element).sub_element.dim # Reference coordinates are known, no coordinate field, so we compute # this component as linear combination of coordinate_dofs "dofs" and table # Find table name ttype = tabledata.ttype assert ttype != "zeros" assert ttype != "ones" begin = tabledata.offset num_dofs = tabledata.values.shape[3] bs = tabledata.block_size # Inlined version (we know this is bounded by a small number) FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs, mt.restriction) value = L.Sum( [dof_access[begin + i * bs] * FE[i] for i in range(num_dofs)]) code = [L.VariableDecl("const double", access, value)] return code
def _compute_element_ir(ufl_element, element_numbers, finite_element_names): """Compute intermediate representation of element.""" logger.info(f"Computing IR for element {ufl_element}") # Create basix elements basix_element = create_element(ufl_element) cell = ufl_element.cell() cellname = cell.cellname() # Store id ir = {"id": element_numbers[ufl_element]} ir["name"] = finite_element_names[ufl_element] # Compute data for each function ir["signature"] = repr(ufl_element) ir["cell_shape"] = cellname ir["topological_dimension"] = cell.topological_dimension() ir["geometric_dimension"] = cell.geometric_dimension() ir["space_dimension"] = basix_element.dim ir["element_type"] = basix_element.element_type ir["lagrange_variant"] = basix_element.lagrange_variant ir["basix_family"] = basix_element.element_family ir["basix_cell"] = basix_element.cell_type ir["discontinuous"] = basix_element.discontinuous ir["degree"] = ufl_element.degree() ir["family"] = ufl_element.family() ir["value_shape"] = ufl_element.value_shape() ir["reference_value_shape"] = ufl_element.reference_value_shape() ir["num_sub_elements"] = ufl_element.num_sub_elements() ir["sub_elements"] = [finite_element_names[e] for e in ufl_element.sub_elements()] if hasattr(basix_element, "block_size"): ir["block_size"] = basix_element.block_size ufl_element = ufl_element.sub_elements()[0] basix_element = create_element(ufl_element) else: ir["block_size"] = 1 ir["entity_dofs"] = basix_element.entity_dofs return ir_element(**ir)
def test_values(self, family, cell, degree, reference): # Create element element = create_element(FiniteElement(family, cell, degree)) # Get some points and check basis function values at points points = [random_point(element_coords(cell)) for i in range(5)] for x in points: table = element.tabulate(0, (x,)) basis = table[0] if sum(element.value_shape) == 1: for i, value in enumerate(basis[0]): assert numpy.isclose(value, reference[i](x)) else: for i, ref in enumerate(reference): assert numpy.allclose(basis[0][i::len(reference)], ref(x))
def facet_edge_vectors(self, e, mt, tabledata, num_points): L = self.language # Get properties of domain domain = mt.terminal.ufl_domain() cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() if cellname in ("tetrahedron", "hexahedron"): pass elif cellname in ("interval", "triangle", "quadrilateral"): raise RuntimeError( f"The physical facet edge vectors doesn't make sense for {cellname} cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, MixedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) assert ufl_scalar_element.family() in ("Lagrange", "Q", "S") basix_scalar_element = create_element(ufl_scalar_element) num_scalar_dofs = basix_scalar_element.dim # Get edge vertices facet = self.symbols.entity("facet", mt.restriction) facet_edge = mt.component[0] facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices") vertex0 = facet_edge_vertices[facet][facet_edge][0] vertex1 = facet_edge_vertices[facet][facet_edge][1] # Get dofs and component component = mt.component[1] assert coordinate_element.degree() == 1, "Assuming degree 1 element" dof0 = vertex0 dof1 = vertex1 expr = ( self.symbols.domain_dof_access(dof0, component, gdim, num_scalar_dofs, mt.restriction) - self.symbols.domain_dof_access(dof1, component, gdim, num_scalar_dofs, mt.restriction)) return expr
def _compute_dofmap_ir(ufl_element, element_numbers, dofmap_names): """Compute intermediate representation of dofmap.""" logger.info(f"Computing IR for dofmap of {ufl_element}") # Create basix elements basix_element = create_element(ufl_element) # Store id ir = {"id": element_numbers[ufl_element]} ir["name"] = dofmap_names[ufl_element] # Compute data for each function ir["signature"] = "FFCx dofmap for " + repr(ufl_element) ir["sub_dofmaps"] = [dofmap_names[e] for e in ufl_element.sub_elements()] ir["num_sub_dofmaps"] = ufl_element.num_sub_elements() if hasattr(basix_element, "block_size"): ir["block_size"] = basix_element.block_size basix_element = basix_element.sub_element else: ir["block_size"] = 1 # Precompute repeatedly used items for i in basix_element.num_entity_dofs: # FIXME: this assumes the same number of DOFs on each entity of the same dim: this # assumption will not be true for prisms and pyramids if max(i) != min(i): raise RuntimeError("Elements with different numbers of DOFs on subentities of the same dimension" " are not yet supported in FFCx.") num_dofs_per_entity = [i[0] for i in basix_element.num_entity_dofs] ir["num_entity_dofs"] = num_dofs_per_entity ir["tabulate_entity_dofs"] = (basix_element.entity_dofs, num_dofs_per_entity) num_dofs_per_entity_closure = [i[0] for i in basix_element.num_entity_closure_dofs] ir["num_entity_closure_dofs"] = num_dofs_per_entity_closure ir["tabulate_entity_closure_dofs"] = (basix_element.entity_closure_dofs, num_dofs_per_entity_closure) ir["num_global_support_dofs"] = basix_element.num_global_support_dofs ir["num_element_support_dofs"] = basix_element.dim - ir["num_global_support_dofs"] return ir_dofmap(**ir)
def cell_vertices(self, e, mt, tabledata, num_points): # Get properties of domain domain = mt.terminal.ufl_domain() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, MixedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) assert ufl_scalar_element.family() in ("Lagrange", "Q", "S") basix_scalar_element = create_element(ufl_scalar_element) vertex_scalar_dofs = basix_scalar_element.entity_dofs[0] num_scalar_dofs = basix_scalar_element.dim # Get dof and component dof, = vertex_scalar_dofs[mt.component[0]] component = mt.component[1] expr = self.symbols.domain_dof_access(dof, component, gdim, num_scalar_dofs, mt.restriction) return expr
def cell_edge_vectors(self, e, mt, tabledata, num_points): # Get properties of domain domain = mt.terminal.ufl_domain() cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): pass elif cellname == "interval": raise RuntimeError("The physical cell edge vectors doesn't make sense for interval cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, MixedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) assert ufl_scalar_element.family() in ("Lagrange", "Q", "S") basix_scalar_element = create_element(ufl_scalar_element) vertex_scalar_dofs = basix_scalar_element.entity_dofs[0] num_scalar_dofs = basix_scalar_element.dim # Get edge vertices edge = mt.component[0] vertex0, vertex1 = basix_scalar_element.reference_topology[1][edge] # Get dofs and component dof0, = vertex_scalar_dofs[vertex0] dof1, = vertex_scalar_dofs[vertex1] component = mt.component[1] return self.symbols.domain_dof_access( dof0, component, gdim, num_scalar_dofs, mt.restriction ) - self.symbols.domain_dof_access( dof1, component, gdim, num_scalar_dofs, mt.restriction )
def _compute_expression_ir(expression, index, prefix, analysis, parameters, visualise): """Compute intermediate representation of expression.""" logger.info(f"Computing IR for expression {index}") # Compute representation ir = {} original_expression = (expression[2], expression[1]) ir["name"] = naming.expression_name(original_expression, prefix) original_expression = expression[2] points = expression[1] expression = expression[0] try: cell = expression.ufl_domain().ufl_cell() except AttributeError: # This case corresponds to a spatially constant expression # without any dependencies cell = None # Prepare dimensions of all unique element in expression, including # elements for arguments, coefficients and coordinate mappings ir["element_dimensions"] = { ufl_element: create_element(ufl_element).dim for ufl_element in analysis.unique_elements } # Extract dimensions for elements of arguments only arguments = ufl.algorithms.extract_arguments(expression) argument_elements = tuple(f.ufl_element() for f in arguments) argument_dimensions = [ ir["element_dimensions"][ufl_element] for ufl_element in argument_elements ] tensor_shape = argument_dimensions ir["tensor_shape"] = tensor_shape ir["expression_shape"] = list(expression.ufl_shape) coefficients = ufl.algorithms.extract_coefficients(expression) coefficient_numbering = {} for i, coeff in enumerate(coefficients): coefficient_numbering[coeff] = i # Add coefficient numbering to IR ir["coefficient_numbering"] = coefficient_numbering original_coefficient_positions = [] original_coefficients = ufl.algorithms.extract_coefficients(original_expression) for coeff in coefficients: original_coefficient_positions.append(original_coefficients.index(coeff)) ir["original_coefficient_positions"] = original_coefficient_positions coefficient_elements = tuple(f.ufl_element() for f in coefficients) offsets = {} _offset = 0 for i, el in enumerate(coefficient_elements): offsets[coefficients[i]] = _offset _offset += ir["element_dimensions"][el] # Copy offsets also into IR ir["coefficient_offsets"] = offsets ir["integral_type"] = "expression" ir["entitytype"] = "cell" # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in ufl.algorithms.analysis.extract_constants(expression): original_constant_offsets[constant] = _offset _offset += numpy.product(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["points"] = points weights = numpy.array([1.0] * points.shape[0]) rule = QuadratureRule(points, weights) integrands = {rule: expression} if cell is None: assert len(ir["original_coefficient_positions"]) == 0 and len(ir["original_constant_offsets"]) == 0 expression_ir = compute_integral_ir(cell, ir["integral_type"], ir["entitytype"], integrands, tensor_shape, parameters, visualise) ir.update(expression_ir) return ir_expression(**ir)
def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, parameters, visualise): """Compute intermediate represention for form integrals.""" _entity_types = { "cell": "cell", "exterior_facet": "facet", "interior_facet": "facet", "vertex": "vertex", "custom": "cell" } # Iterate over groups of integrals irs = [] for itg_data_index, itg_data in enumerate(form_data.integral_data): logger.info(f"Computing IR for integral in integral group {itg_data_index}") # Compute representation entitytype = _entity_types[itg_data.integral_type] cell = itg_data.domain.ufl_cell() cellname = cell.cellname() tdim = cell.topological_dimension() assert all(tdim == itg.ufl_domain().topological_dimension() for itg in itg_data.integrals) ir = { "integral_type": itg_data.integral_type, "subdomain_id": itg_data.subdomain_id, "rank": form_data.rank, "geometric_dimension": form_data.geometric_dimension, "topological_dimension": tdim, "entitytype": entitytype, "num_facets": cell.num_facets(), "num_vertices": cell.num_vertices(), "enabled_coefficients": itg_data.enabled_coefficients, "cell_shape": cellname } # Get element space dimensions unique_elements = element_numbers.keys() ir["element_dimensions"] = { ufl_element: create_element(ufl_element).dim for ufl_element in unique_elements } ir["element_ids"] = { ufl_element: i for i, ufl_element in enumerate(unique_elements) } # Create dimensions of primary indices, needed to reset the argument # 'A' given to tabulate_tensor() by the assembler. argument_dimensions = [ ir["element_dimensions"][ufl_element] for ufl_element in form_data.argument_elements ] # Compute shape of element tensor if ir["integral_type"] == "interior_facet": ir["tensor_shape"] = [2 * dim for dim in argument_dimensions] else: ir["tensor_shape"] = argument_dimensions integral_type = itg_data.integral_type cell = itg_data.domain.ufl_cell() # Group integrands with the same quadrature rule grouped_integrands = {} for integral in itg_data.integrals: md = integral.metadata() or {} scheme = md["quadrature_rule"] degree = md["quadrature_degree"] if scheme == "custom": points = md["quadrature_points"] weights = md["quadrature_weights"] elif scheme == "vertex": # FIXME: Could this come from basix? # The vertex scheme, i.e., averaging the function value in the # vertices and multiplying with the simplex volume, is only of # order 1 and inferior to other generic schemes in terms of # error reduction. Equation systems generated with the vertex # scheme have some properties that other schemes lack, e.g., the # mass matrix is a simple diagonal matrix. This may be # prescribed in certain cases. if degree > 1: warnings.warn( "Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". format(degree)) if cellname == "tetrahedron": points, weights = (numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), numpy.array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0])) elif cellname == "triangle": points, weights = (numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), numpy.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])) elif cellname == "interval": # Trapezoidal rule return (numpy.array([[0.0], [1.0]]), numpy.array([1.0 / 2.0, 1.0 / 2.0])) else: points, weights = create_quadrature_points_and_weights( integral_type, cell, degree, scheme) points = numpy.asarray(points) weights = numpy.asarray(weights) rule = QuadratureRule(points, weights) if rule not in grouped_integrands: grouped_integrands[rule] = [] grouped_integrands[rule].append(integral.integrand()) sorted_integrals = {} for rule, integrands in grouped_integrands.items(): integrands_summed = sorted_expr_sum(integrands) integral_new = Integral(integrands_summed, itg_data.integral_type, itg_data.domain, itg_data.subdomain_id, {}, None) sorted_integrals[rule] = integral_new # TODO: See if coefficient_numbering can be removed # Build coefficient numbering for UFC interface here, to avoid # renumbering in UFL and application of replace mapping coefficient_numbering = {} for i, f in enumerate(form_data.reduced_coefficients): coefficient_numbering[f] = i # Add coefficient numbering to IR ir["coefficient_numbering"] = coefficient_numbering index_to_coeff = sorted([(v, k) for k, v in coefficient_numbering.items()]) offsets = {} width = 2 if integral_type in ("interior_facet") else 1 _offset = 0 for k, el in zip(index_to_coeff, form_data.coefficient_elements): offsets[k[1]] = _offset _offset += width * ir["element_dimensions"][el] # Copy offsets also into IR ir["coefficient_offsets"] = offsets # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in form_data.original_form.constants(): original_constant_offsets[constant] = _offset _offset += numpy.product(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["precision"] = itg_data.metadata["precision"] # Create map from number of quadrature points -> integrand integrands = {rule: integral.integrand() for rule, integral in sorted_integrals.items()} # Build more specific intermediate representation integral_ir = compute_integral_ir(itg_data.domain.ufl_cell(), itg_data.integral_type, ir["entitytype"], integrands, ir["tensor_shape"], parameters, visualise) ir.update(integral_ir) # Fetch name ir["name"] = integral_names[(form_index, itg_data_index)] irs.append(ir_integral(**ir)) return irs
def get_ffcx_table_values(points, cell, integral_type, ufl_element, avg, entitytype, derivative_counts, flat_component): """Extract values from FFCx element table. Returns a 3D numpy array with axes (entity number, quadrature point number, dof number) """ deriv_order = sum(derivative_counts) if integral_type in ufl.custom_integral_types: # Use quadrature points on cell for analysis in custom integral types integral_type = "cell" assert not avg if integral_type == "expression": # FFCx tables for expression are generated as interior cell points integral_type = "cell" if avg in ("cell", "facet"): # Redefine points to compute average tables # Make sure this is not called with points, that doesn't make sense # assert points is None # Not expecting derivatives of averages assert not any(derivative_counts) assert deriv_order == 0 # Doesn't matter if it's exterior or interior facet integral, # just need a valid integral type to create quadrature rule if avg == "cell": integral_type = "cell" elif avg == "facet": integral_type = "exterior_facet" # Make quadrature rule and get points and weights points, weights = create_quadrature_points_and_weights(integral_type, cell, ufl_element.degree(), "default") # Tabulate table of basis functions and derivatives in points for each entity tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) num_entities = ufl.cell.num_cell_entities[cell.cellname()][entity_dim] numpy.set_printoptions(suppress=True, precision=2) basix_element = create_element(ufl_element) # Extract arrays for the right scalar component component_tables = [] sh = tuple(basix_element.value_shape) assert len(sh) > 0 component_element, offset, stride = basix_element.get_component_element(flat_component) for entity in range(num_entities): entity_points = map_integral_points(points, integral_type, cell, entity) tbl = component_element.tabulate(deriv_order, entity_points) tbl = tbl[basix_index(*derivative_counts)] component_tables.append(tbl) if avg in ("cell", "facet"): # Compute numeric integral of the each component table wsum = sum(weights) for entity, tbl in enumerate(component_tables): num_dofs = tbl.shape[1] tbl = numpy.dot(tbl, weights) / wsum tbl = numpy.reshape(tbl, (1, num_dofs)) component_tables[entity] = tbl # Loop over entities and fill table blockwise (each block = points x dofs) # Reorder axes as (points, dofs) instead of (dofs, points) assert len(component_tables) == num_entities num_points, num_dofs = component_tables[0].shape shape = (1, num_entities, num_points, num_dofs) res = numpy.zeros(shape) for entity in range(num_entities): res[:, entity, :, :] = component_tables[entity] return {'array': res, 'offset': offset, 'stride': stride}
def build_optimized_tables( quadrature_rule, cell, integral_type, entitytype, modified_terminals, existing_tables, rtol=default_rtol, atol=default_atol ): """Build the element tables needed for a list of modified terminals. Input: entitytype - str modified_terminals - ordered sequence of unique modified terminals FIXME: Document Output: mt_tables - dict(ModifiedTerminal: table data) """ # Add to element tables analysis = {} for mt in modified_terminals: # FIXME: Use a namedtuple for res res = get_modified_terminal_element(mt) if res: analysis[mt] = res # Build element numbering using topological ordering so subelements # get priority all_elements = [res[0] for res in analysis.values()] unique_elements = ufl.algorithms.sort_elements( ufl.algorithms.analysis.extract_sub_elements(all_elements)) element_numbers = {element: i for i, element in enumerate(unique_elements)} tables = existing_tables mt_tables = {} for mt in modified_terminals: res = analysis.get(mt) if not res: continue element, avg, local_derivatives, flat_component = res # Generate table and store table name with modified terminal # Build name for this particular table element_number = element_numbers[element] name = generate_psi_table_name(quadrature_rule, element_number, avg, entitytype, local_derivatives, flat_component) # FIXME - currently just recalculate the tables every time, # only reusing them if they match numerically. # It should be possible to reuse the cached tables by name, but # the dofmap offset may differ due to restriction. tdim = cell.topological_dimension() if entitytype == "facet": if tdim == 1: t = get_ffcx_table_values(quadrature_rule.points, cell, integral_type, element, avg, entitytype, local_derivatives, flat_component) elif tdim == 2: new_table = [] for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_interval(quadrature_rule.points, ref), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = numpy.vstack([td['array'] for td in new_table]) elif tdim == 3: cell_type = cell.cellname() if cell_type == "tetrahedron": new_table = [] for rot in range(3): for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_triangle( quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = numpy.vstack([td['array'] for td in new_table]) elif cell_type == "hexahedron": new_table = [] for rot in range(4): for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_quadrilateral( quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = numpy.vstack([td['array'] for td in new_table]) else: t = get_ffcx_table_values(quadrature_rule.points, cell, integral_type, element, avg, entitytype, local_derivatives, flat_component) # Clean up table tbl = clamp_table_small_numbers(t['array'], rtol=rtol, atol=atol) tabletype = analyse_table_type(tbl) if tabletype in piecewise_ttypes: # Reduce table to dimension 1 along num_points axis in generated code tbl = tbl[:, :, :1, :] if tabletype in uniform_ttypes: # Reduce table to dimension 1 along num_entities axis in generated code tbl = tbl[:, :1, :, :] is_permuted = is_permuted_table(tbl) if not is_permuted: # Reduce table along num_perms axis tbl = tbl[:1, :, :, :] # Check for existing identical table xname_found = False for xname in tables: if equal_tables(tbl, tables[xname]): xname_found = True break if xname_found: name = xname # Retrieve existing table tbl = tables[name] else: # Store new table tables[name] = tbl cell_offset = 0 basix_element = create_element(element) if mt.restriction == "-" and isinstance(mt.terminal, ufl.classes.FormArgument): # offset = 0 or number of element dofs, if restricted to "-" cell_offset = basix_element.dim offset = cell_offset + t['offset'] block_size = t['stride'] # tables is just np.arrays, mt_tables hold metadata too mt_tables[mt] = unique_table_reference_t( name, tbl, offset, block_size, tabletype, tabletype in piecewise_ttypes, tabletype in uniform_ttypes, is_permuted) return mt_tables
def xtest_hhj(degree, expected_dim): "Test space dimensions of Hellan-Herrmann-Johnson element." P = create_element(FiniteElement("HHJ", "triangle", degree)) assert P.dim == expected_dim
def test_regge(degree, expected_dim): "Test space dimensions of generalized Regge element." P = create_element(FiniteElement("Regge", "triangle", degree)) assert P.dim == expected_dim
def test_discontinuous_lagrange(degree, expected_dim): "Test space dimensions of discontinuous Lagrange elements." P = create_element(FiniteElement("DG", "triangle", degree)) assert P.dim == expected_dim
def xtest_continuous_lagrange_quadrilateral_spectral(degree, expected_dim): "Test space dimensions of continuous TensorProduct elements (quadrilateral)." P = create_element(FiniteElement("Lagrange", "quadrilateral", degree, variant="spectral")) assert P.dim == expected_dim