Exemplo n.º 1
0
def _compute_element_ir(ufl_element, element_numbers, finite_element_names,
                        epsilon):
    """Compute intermediate representation of element."""

    logger.info(f"Computing IR for element {ufl_element}")

    # Create basix elements
    basix_element = create_basix_element(ufl_element)
    cell = ufl_element.cell()
    cellname = cell.cellname()

    # Store id
    ir = {"id": element_numbers[ufl_element]}
    ir["name"] = finite_element_names[ufl_element]

    # Compute data for each function
    ir["signature"] = repr(ufl_element)
    ir["cell_shape"] = cellname
    ir["topological_dimension"] = cell.topological_dimension()
    ir["geometric_dimension"] = cell.geometric_dimension()
    ir["space_dimension"] = basix_element.dim
    ir["degree"] = ufl_element.degree()
    ir["family"] = basix_element.family_name
    ir["value_shape"] = ufl_element.value_shape()
    ir["reference_value_shape"] = ufl_element.reference_value_shape()

    ir["num_sub_elements"] = ufl_element.num_sub_elements()
    ir["create_sub_element"] = [
        finite_element_names[e] for e in ufl_element.sub_elements()
    ]

    if hasattr(basix_element, "block_size"):
        ir["block_size"] = basix_element.block_size
        ufl_element = ufl_element.sub_elements()[0]
        basix_element = create_basix_element(ufl_element)
    else:
        ir["block_size"] = 1

    im = basix_element.interpolation_matrix
    if im.shape[0] == im.shape[1] and numpy.allclose(
            im, numpy.identity(im.shape[0])):
        ir["interpolation_is_identity"] = 1
    else:
        ir["interpolation_is_identity"] = 0

    ir["base_transformations"] = basix_element.base_transformations
    ir["needs_transformation_data"] = 0
    for p in basix_element.base_transformations:
        if not numpy.allclose(p, numpy.identity(len(p))):
            ir["needs_transformation_data"] = 1

    ir["entity_dofs"] = basix_element.entity_dof_numbers

    return ir_element(**ir)
Exemplo n.º 2
0
def _tabulate_coordinate_mapping_basis(ufl_element):
    # TODO: Move this function to a table generation module?

    # Get scalar element, assuming coordinates are represented
    # with a VectorElement of scalar subelements
    selement = ufl_element.sub_elements()[0]

    basix_element = create_basix_element(selement)
    cell = selement.cell()
    tdim = cell.topological_dimension()

    tables = {}

    # Get points
    origin = (0.0, ) * tdim
    midpoint = cell_midpoint(cell)

    # Tabulate basis
    t0 = basix_element.tabulate(1, [origin])
    tm = basix_element.tabulate(1, [midpoint])

    # Get basis values at cell origin
    tables["x0"] = t0[0][:, 0]

    # Get basis values at cell midpoint
    tables["xm"] = tm[0][:, 0]

    # Get basis derivative values at cell origin
    tables["J0"] = numpy.asarray([t0[d][:, 0] for d in range(1, 1 + tdim)])

    # Get basis derivative values at cell midpoint
    tables["Jm"] = numpy.asarray([tm[d][:, 0] for d in range(1, 1 + tdim)])

    return tables
Exemplo n.º 3
0
    def test_values(self, family, cell, degree, reference):
        # Create element
        element = create_basix_element(FiniteElement(family, cell, degree))

        # Get some points and check basis function values at points
        points = [random_point(element_coords(cell)) for i in range(5)]
        for x in points:
            table = element.tabulate(0, (x, ))
            basis = table[0]
            if sum(element.value_shape) == 1:
                for i, value in enumerate(basis[0]):
                    assert numpy.isclose(value, reference[i](x))
            else:
                for i, ref in enumerate(reference):
                    assert numpy.allclose(basis[0][i::len(reference)], ref(x))
Exemplo n.º 4
0
    def facet_edge_vectors(self, e, mt, tabledata, num_points):
        L = self.language

        # Get properties of domain
        domain = mt.terminal.ufl_domain()
        cellname = domain.ufl_cell().cellname()
        gdim = domain.geometric_dimension()
        coordinate_element = domain.ufl_coordinate_element()

        if cellname in ("tetrahedron", "hexahedron"):
            pass
        elif cellname in ("interval", "triangle", "quadrilateral"):
            raise RuntimeError(
                f"The physical facet edge vectors doesn't make sense for {cellname} cell."
            )
        else:
            raise RuntimeError(f"Unhandled cell types {cellname}.")

        # Get dimension and dofmap of scalar element
        assert isinstance(coordinate_element, MixedElement)
        assert coordinate_element.value_shape() == (gdim, )
        ufl_scalar_element, = set(coordinate_element.sub_elements())
        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")

        basix_scalar_element = create_basix_element(ufl_scalar_element)
        num_scalar_dofs = basix_scalar_element.dim

        # Get edge vertices
        facet = self.symbols.entity("facet", mt.restriction)
        facet_edge = mt.component[0]
        facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices")
        vertex0 = facet_edge_vertices[facet][facet_edge][0]
        vertex1 = facet_edge_vertices[facet][facet_edge][1]

        # Get dofs and component
        component = mt.component[1]
        assert coordinate_element.degree() == 1, "Assuming degree 1 element"
        dof0 = vertex0
        dof1 = vertex1
        expr = (self.symbols.domain_dof_access(
            dof0, component, gdim, num_scalar_dofs, mt.restriction) -
                self.symbols.domain_dof_access(
                    dof1, component, gdim, num_scalar_dofs, mt.restriction))

        return expr
Exemplo n.º 5
0
def _compute_dofmap_ir(ufl_element, element_numbers, dofmap_names):
    """Compute intermediate representation of dofmap."""

    logger.info(f"Computing IR for dofmap of {ufl_element}")

    # Create basix elements
    basix_element = create_basix_element(ufl_element)

    # Store id
    ir = {"id": element_numbers[ufl_element]}
    ir["name"] = dofmap_names[ufl_element]

    # Compute data for each function
    ir["signature"] = "FFCX dofmap for " + repr(ufl_element)
    ir["create_sub_dofmap"] = [
        dofmap_names[e] for e in ufl_element.sub_elements()
    ]
    ir["num_sub_dofmaps"] = ufl_element.num_sub_elements()

    if hasattr(basix_element, "block_size"):
        ir["block_size"] = basix_element.block_size
        basix_element = basix_element.sub_element
    else:
        ir["block_size"] = 1

    ir["base_transformations"] = basix_element.base_transformations

    # Precompute repeatedly used items
    for i in basix_element.entity_dofs:
        if max(i) != min(i):
            raise RuntimeError(
                "Elements with different numbers of DOFs on subentities of the same dimension"
                " are not yet supported in FFCx.")
    num_dofs_per_entity = [i[0] for i in basix_element.entity_dofs]

    ir["num_entity_dofs"] = num_dofs_per_entity
    ir["tabulate_entity_dofs"] = (basix_element.entity_dof_numbers,
                                  num_dofs_per_entity)

    ir["num_global_support_dofs"] = basix_element.num_global_support_dofs
    ir["num_element_support_dofs"] = basix_element.dim - ir[
        "num_global_support_dofs"]

    return ir_dofmap(**ir)
Exemplo n.º 6
0
    def cell_edge_vectors(self, e, mt, tabledata, num_points):
        # Get properties of domain
        domain = mt.terminal.ufl_domain()
        cellname = domain.ufl_cell().cellname()
        gdim = domain.geometric_dimension()
        coordinate_element = domain.ufl_coordinate_element()

        if cellname in ("triangle", "tetrahedron", "quadrilateral",
                        "hexahedron"):
            pass
        elif cellname == "interval":
            raise RuntimeError(
                "The physical cell edge vectors doesn't make sense for interval cell."
            )
        else:
            raise RuntimeError(f"Unhandled cell types {cellname}.")

        # Get dimension and dofmap of scalar element
        assert isinstance(coordinate_element, MixedElement)
        assert coordinate_element.value_shape() == (gdim, )
        ufl_scalar_element, = set(coordinate_element.sub_elements())
        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")

        basix_scalar_element = create_basix_element(ufl_scalar_element)
        vertex_scalar_dofs = basix_scalar_element.entity_dof_numbers[0]
        num_scalar_dofs = basix_scalar_element.dim

        # Get edge vertices
        edge = mt.component[0]
        vertex0, vertex1 = basix_scalar_element.reference_topology[1][edge]

        # Get dofs and component
        dof0, = vertex_scalar_dofs[vertex0]
        dof1, = vertex_scalar_dofs[vertex1]
        component = mt.component[1]

        return self.symbols.domain_dof_access(
            dof0, component, gdim, num_scalar_dofs,
            mt.restriction) - self.symbols.domain_dof_access(
                dof1, component, gdim, num_scalar_dofs, mt.restriction)
Exemplo n.º 7
0
    def cell_vertices(self, e, mt, tabledata, num_points):
        # Get properties of domain
        domain = mt.terminal.ufl_domain()
        gdim = domain.geometric_dimension()
        coordinate_element = domain.ufl_coordinate_element()

        # Get dimension and dofmap of scalar element
        assert isinstance(coordinate_element, MixedElement)
        assert coordinate_element.value_shape() == (gdim, )
        ufl_scalar_element, = set(coordinate_element.sub_elements())
        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")

        basix_scalar_element = create_basix_element(ufl_scalar_element)
        vertex_scalar_dofs = basix_scalar_element.entity_dof_numbers[0]
        num_scalar_dofs = basix_scalar_element.dim

        # Get dof and component
        dof, = vertex_scalar_dofs[mt.component[0]]
        component = mt.component[1]

        expr = self.symbols.domain_dof_access(dof, component, gdim,
                                              num_scalar_dofs, mt.restriction)
        return expr
Exemplo n.º 8
0
def num_coordinate_component_dofs(coordinate_element):
    """Get the number of dofs for a coordinate component for this degree."""
    return create_basix_element(coordinate_element).sub_element.dim
Exemplo n.º 9
0
def xtest_hhj(degree, expected_dim):
    "Test space dimensions of Hellan-Herrmann-Johnson element."
    P = create_basix_element(FiniteElement("HHJ", "triangle", degree))
    assert P.dim == expected_dim
Exemplo n.º 10
0
def test_regge(degree, expected_dim):
    "Test space dimensions of generalized Regge element."
    P = create_basix_element(FiniteElement("Regge", "triangle", degree))
    assert P.dim == expected_dim
Exemplo n.º 11
0
def xtest_continuous_lagrange_quadrilateral_spectral(degree, expected_dim):
    "Test space dimensions of continuous TensorProduct elements (quadrilateral)."
    P = create_basix_element(
        FiniteElement("Lagrange", "quadrilateral", degree, variant="spectral"))
    assert P.dim == expected_dim
Exemplo n.º 12
0
def _compute_expression_ir(expression, index, prefix, analysis, parameters,
                           visualise):

    logger.info(f"Computing IR for expression {index}")

    # Compute representation
    ir = {}

    original_expression = (expression[2], expression[1])
    sig = naming.compute_signature([original_expression], "", parameters)
    ir["name"] = "expression_{!s}".format(sig)

    original_expression = expression[2]
    points = expression[1]
    expression = expression[0]

    try:
        cell = expression.ufl_domain().ufl_cell()
    except AttributeError:
        # This case corresponds to a spatially constant expression without any dependencies
        cell = None

    # Prepare dimensions of all unique element in expression, including
    # elements for arguments, coefficients and coordinate mappings
    ir["element_dimensions"] = {
        ufl_element: create_basix_element(ufl_element).dim
        for ufl_element in analysis.unique_elements
    }

    # Extract dimensions for elements of arguments only
    arguments = ufl.algorithms.extract_arguments(expression)
    argument_elements = tuple(f.ufl_element() for f in arguments)
    argument_dimensions = [
        ir["element_dimensions"][ufl_element]
        for ufl_element in argument_elements
    ]

    tensor_shape = argument_dimensions
    ir["tensor_shape"] = tensor_shape

    ir["expression_shape"] = list(expression.ufl_shape)

    coefficients = ufl.algorithms.extract_coefficients(expression)
    coefficient_numbering = {}
    for i, coeff in enumerate(coefficients):
        coefficient_numbering[coeff] = i

    # Add coefficient numbering to IR
    ir["coefficient_numbering"] = coefficient_numbering

    original_coefficient_positions = []
    original_coefficients = ufl.algorithms.extract_coefficients(
        original_expression)
    for coeff in coefficients:
        original_coefficient_positions.append(
            original_coefficients.index(coeff))

    ir["original_coefficient_positions"] = original_coefficient_positions

    coefficient_elements = tuple(f.ufl_element() for f in coefficients)

    offsets = {}
    _offset = 0
    for i, el in enumerate(coefficient_elements):
        offsets[coefficients[i]] = _offset
        _offset += ir["element_dimensions"][el]

    # Copy offsets also into IR
    ir["coefficient_offsets"] = offsets

    ir["integral_type"] = "expression"
    ir["entitytype"] = "cell"

    # Build offsets for Constants
    original_constant_offsets = {}
    _offset = 0
    for constant in ufl.algorithms.analysis.extract_constants(expression):
        original_constant_offsets[constant] = _offset
        _offset += numpy.product(constant.ufl_shape, dtype=int)

    ir["original_constant_offsets"] = original_constant_offsets

    ir["points"] = points

    weights = numpy.array([1.0] * points.shape[0])
    rule = QuadratureRule(points, weights)
    integrands = {rule: expression}

    if cell is None:
        assert len(ir["original_coefficient_positions"]) == 0 and len(
            ir["original_constant_offsets"]) == 0

    expression_ir = compute_integral_ir(cell, ir["integral_type"],
                                        ir["entitytype"], integrands,
                                        tensor_shape, parameters, visualise)

    ir.update(expression_ir)

    return ir_expression(**ir)
Exemplo n.º 13
0
def _compute_integral_ir(form_data, form_index, prefix, element_numbers,
                         integral_names, parameters, visualise):
    """Compute intermediate represention for form integrals."""

    _entity_types = {
        "cell": "cell",
        "exterior_facet": "facet",
        "interior_facet": "facet",
        "vertex": "vertex",
        "custom": "cell"
    }

    # Iterate over groups of integrals
    irs = []
    for itg_data_index, itg_data in enumerate(form_data.integral_data):

        logger.info(
            f"Computing IR for integral in integral group {itg_data_index}")

        # Compute representation
        entitytype = _entity_types[itg_data.integral_type]
        cell = itg_data.domain.ufl_cell()
        cellname = cell.cellname()
        tdim = cell.topological_dimension()
        assert all(tdim == itg.ufl_domain().topological_dimension()
                   for itg in itg_data.integrals)

        ir = {
            "integral_type": itg_data.integral_type,
            "subdomain_id": itg_data.subdomain_id,
            "rank": form_data.rank,
            "geometric_dimension": form_data.geometric_dimension,
            "topological_dimension": tdim,
            "entitytype": entitytype,
            "num_facets": cell.num_facets(),
            "num_vertices": cell.num_vertices(),
            "enabled_coefficients": itg_data.enabled_coefficients,
            "cell_shape": cellname
        }

        # Get element space dimensions
        unique_elements = element_numbers.keys()
        ir["element_dimensions"] = {
            ufl_element: create_basix_element(ufl_element).dim
            for ufl_element in unique_elements
        }

        ir["element_ids"] = {
            ufl_element: i
            for i, ufl_element in enumerate(unique_elements)
        }

        # Create dimensions of primary indices, needed to reset the argument
        # 'A' given to tabulate_tensor() by the assembler.
        argument_dimensions = [
            ir["element_dimensions"][ufl_element]
            for ufl_element in form_data.argument_elements
        ]

        # Compute shape of element tensor
        if ir["integral_type"] == "interior_facet":
            ir["tensor_shape"] = [2 * dim for dim in argument_dimensions]
        else:
            ir["tensor_shape"] = argument_dimensions

        integral_type = itg_data.integral_type
        cell = itg_data.domain.ufl_cell()

        # Group integrands with the same quadrature rule
        grouped_integrands = {}
        for integral in itg_data.integrals:
            md = integral.metadata() or {}
            scheme = md["quadrature_rule"]
            degree = md["quadrature_degree"]

            if scheme == "custom":
                points = md["quadrature_points"]
                weights = md["quadrature_weights"]
            elif scheme == "vertex":
                # FIXME: Could this come from basix?

                # The vertex scheme, i.e., averaging the function value in the
                # vertices and multiplying with the simplex volume, is only of
                # order 1 and inferior to other generic schemes in terms of
                # error reduction. Equation systems generated with the vertex
                # scheme have some properties that other schemes lack, e.g., the
                # mass matrix is a simple diagonal matrix. This may be
                # prescribed in certain cases.
                if degree > 1:
                    warnings.warn(
                        "Explicitly selected vertex quadrature (degree 1), but requested degree is {}."
                        .format(degree))
                if cellname == "tetrahedron":
                    points, weights = (numpy.array([[0.0, 0.0, 0.0],
                                                    [1.0, 0.0, 0.0],
                                                    [0.0, 1.0, 0.0],
                                                    [0.0, 0.0, 1.0]]),
                                       numpy.array([
                                           1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0,
                                           1.0 / 24.0
                                       ]))
                elif cellname == "triangle":
                    points, weights = (numpy.array([[0.0, 0.0], [1.0, 0.0],
                                                    [0.0, 1.0]]),
                                       numpy.array(
                                           [1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0]))
                elif cellname == "interval":
                    # Trapezoidal rule
                    return (numpy.array([[0.0], [1.0]]),
                            numpy.array([1.0 / 2.0, 1.0 / 2.0]))
            else:
                points, weights = create_quadrature_points_and_weights(
                    integral_type, cell, degree, scheme)

            points = numpy.asarray(points)
            weights = numpy.asarray(weights)

            rule = QuadratureRule(points, weights)

            if rule not in grouped_integrands:
                grouped_integrands[rule] = []

            grouped_integrands[rule].append(integral.integrand())

        sorted_integrals = {}
        for rule, integrands in grouped_integrands.items():
            integrands_summed = sorted_expr_sum(integrands)

            integral_new = Integral(integrands_summed, itg_data.integral_type,
                                    itg_data.domain, itg_data.subdomain_id, {},
                                    None)
            sorted_integrals[rule] = integral_new

        # TODO: See if coefficient_numbering can be removed
        # Build coefficient numbering for UFC interface here, to avoid
        # renumbering in UFL and application of replace mapping
        coefficient_numbering = {}
        for i, f in enumerate(form_data.reduced_coefficients):
            coefficient_numbering[f] = i

        # Add coefficient numbering to IR
        ir["coefficient_numbering"] = coefficient_numbering

        index_to_coeff = sorted([(v, k)
                                 for k, v in coefficient_numbering.items()])
        offsets = {}
        width = 2 if integral_type in ("interior_facet") else 1
        _offset = 0
        for k, el in zip(index_to_coeff, form_data.coefficient_elements):
            offsets[k[1]] = _offset
            _offset += width * ir["element_dimensions"][el]

        # Copy offsets also into IR
        ir["coefficient_offsets"] = offsets

        # Build offsets for Constants
        original_constant_offsets = {}
        _offset = 0
        for constant in form_data.original_form.constants():
            original_constant_offsets[constant] = _offset
            _offset += numpy.product(constant.ufl_shape, dtype=int)

        ir["original_constant_offsets"] = original_constant_offsets

        ir["precision"] = itg_data.metadata["precision"]

        # Create map from number of quadrature points -> integrand
        integrands = {
            rule: integral.integrand()
            for rule, integral in sorted_integrals.items()
        }

        # Build more specific intermediate representation
        integral_ir = compute_integral_ir(itg_data.domain.ufl_cell(),
                                          itg_data.integral_type,
                                          ir["entitytype"], integrands,
                                          ir["tensor_shape"], parameters,
                                          visualise)

        ir.update(integral_ir)

        # Fetch name
        ir["name"] = integral_names[(form_index, itg_data_index)]

        irs.append(ir_integral(**ir))

    return irs
Exemplo n.º 14
0
def _compute_coordinate_mapping_ir(ufl_coordinate_element, prefix,
                                   element_numbers, coordinate_mapping_names,
                                   dofmap_names, finite_element_names):
    """Compute intermediate representation of coordinate mapping."""

    logger.info(
        f"Computing IR for coordinate mapping {ufl_coordinate_element}")

    cell = ufl_coordinate_element.cell()
    cellname = cell.cellname()

    assert ufl_coordinate_element.value_shape() == (
        cell.geometric_dimension(), )

    # Compute element values
    tables = _tabulate_coordinate_mapping_basis(ufl_coordinate_element)

    # Store id
    ir = {"id": element_numbers[ufl_coordinate_element]}
    ir["prefix"] = prefix
    ir["name"] = coordinate_mapping_names[ufl_coordinate_element]

    # Compute data for each function
    ir["signature"] = "FFCX coordinate_mapping from " + repr(
        ufl_coordinate_element)
    ir["cell_shape"] = cellname
    ir["topological_dimension"] = cell.topological_dimension()
    ir["geometric_dimension"] = ufl_coordinate_element.value_size()

    ir["compute_physical_coordinates"] = None  # currently unused, corresponds to function name
    ir["compute_reference_coordinates"] = None  # currently unused, corresponds to function name
    ir["compute_jacobians"] = None  # currently unused, corresponds to function name
    ir["compute_jacobian_determinants"] = None  # currently unused, corresponds to function name
    ir["compute_jacobian_inverses"] = None  # currently unused, corresponds to function name
    ir["compute_geometry"] = None  # currently unused, corresponds to function name

    # NB! The entries below breaks the pattern of using ir keywords == code keywords,
    # which I personally don't find very useful anyway (martinal).

    basix_element = create_basix_element(ufl_coordinate_element)

    ir["needs_transformation_data"] = 0
    for p in basix_element.base_transformations:
        if not numpy.allclose(p, numpy.identity(len(p))):
            ir["needs_transformation_data"] = 1
    ir["base_transformations"] = basix_element.sub_element.base_transformations

    # Store tables and other coordinate element data
    ir["tables"] = tables
    ir["coordinate_element_degree"] = ufl_coordinate_element.degree()
    ir["coordinate_element_family"] = basix_element.family_name
    ir["num_scalar_coordinate_element_dofs"] = tables["x0"].shape[0]
    ir["is_affine"] = ir["coordinate_element_degree"] == 1 and cellname in (
        "interval", "triangle", "tetrahedron")

    # Get classnames for coordinate element
    ir["coordinate_finite_element_classname"] = finite_element_names[
        ufl_coordinate_element]

    # Get classnames for finite element and dofmap of scalar subelement
    scalar_element = ufl_coordinate_element.sub_elements()[0]
    ir["scalar_coordinate_finite_element_classname"] = finite_element_names[
        scalar_element]
    ir["scalar_dofmap_name"] = dofmap_names[scalar_element]

    return ir_coordinate_map(**ir)
Exemplo n.º 15
0
def get_ffcx_table_values(points, cell, integral_type, ufl_element, avg,
                          entitytype, derivative_counts, flat_component):
    """Extract values from ffcx element table.

    Returns a 3D numpy array with axes
    (entity number, quadrature point number, dof number)
    """
    deriv_order = sum(derivative_counts)

    if integral_type in ufl.custom_integral_types:
        # Use quadrature points on cell for analysis in custom integral types
        integral_type = "cell"
        assert not avg

    if integral_type == "expression":
        # FFCX tables for expression are generated as interior cell points
        integral_type = "cell"

    if avg in ("cell", "facet"):
        # Redefine points to compute average tables

        # Make sure this is not called with points, that doesn't make sense
        # assert points is None

        # Not expecting derivatives of averages
        assert not any(derivative_counts)
        assert deriv_order == 0

        # Doesn't matter if it's exterior or interior facet integral,
        # just need a valid integral type to create quadrature rule
        if avg == "cell":
            integral_type = "cell"
        elif avg == "facet":
            integral_type = "exterior_facet"

        # Make quadrature rule and get points and weights
        points, weights = create_quadrature_points_and_weights(
            integral_type, cell, ufl_element.degree(), "default")

    # Tabulate table of basis functions and derivatives in points for each entity
    tdim = cell.topological_dimension()
    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
    num_entities = ufl.cell.num_cell_entities[cell.cellname()][entity_dim]

    numpy.set_printoptions(suppress=True, precision=2)
    basix_element = create_basix_element(ufl_element)

    # Extract arrays for the right scalar component
    component_tables = []
    sh = ufl_element.value_shape()
    if sh == ():
        # Scalar valued element
        for entity in range(num_entities):
            entity_points = map_integral_points(points, integral_type, cell,
                                                entity)
            # basix
            tbl = basix_element.tabulate(deriv_order, entity_points)
            index = basix_index(*derivative_counts)
            tbl = tbl[index].transpose()

            component_tables.append(tbl)
    elif len(sh) > 0 and ufl_element.num_sub_elements() == 0:
        # 2-tensor-valued elements, not a tensor product
        # mapping flat_component back to tensor component
        (_, f2t) = ufl.permutation.build_component_numbering(
            sh, ufl_element.symmetry())
        t_comp = f2t[flat_component]

        for entity in range(num_entities):
            entity_points = map_integral_points(points, integral_type, cell,
                                                entity)
            tbl = basix_element.tabulate(deriv_order, entity_points)
            tbl = tbl[basix_index(*derivative_counts)]
            sum_sh = sum(sh)
            bshape = (tbl.shape[0], ) + sh + (tbl.shape[1] // sum_sh, )
            tbl = tbl.reshape(bshape).transpose()

            if len(sh) == 1:
                component_tables.append(tbl[:, t_comp[0], :])
            elif len(sh) == 2:
                component_tables.append(tbl[:, t_comp[0], t_comp[1], :])
            else:
                raise RuntimeError(
                    "Cannot tabulate tensor valued element with rank > 2")
    else:
        # Vector-valued or mixed element
        sub_dims = [0] + [e.dim for e in basix_element.sub_elements]
        sub_cmps = [0] + [e.value_size for e in basix_element.sub_elements]

        irange = numpy.cumsum(sub_dims)
        crange = numpy.cumsum(sub_cmps)

        # Find index of sub element which corresponds to the current flat component
        component_element_index = numpy.where(
            crange <= flat_component)[0].shape[0] - 1

        ir = irange[component_element_index:component_element_index + 2]
        cr = crange[component_element_index:component_element_index + 2]

        component_element = basix_element.sub_elements[component_element_index]

        # Get the block size to switch XXYYZZ ordering to XYZXYZ
        if isinstance(ufl_element, ufl.VectorElement) or isinstance(
                ufl_element, ufl.TensorElement):
            block_size = basix_element.block_size
            ir = [ir[0] * block_size // irange[-1], irange[-1], block_size]

        def slice_size(r):
            if len(r) == 1:
                return r[0]
            if len(r) == 2:
                return r[1] - r[0]
            if len(r) == 3:
                return 1 + (r[1] - r[0] - 1) // r[2]

        for entity in range(num_entities):
            entity_points = map_integral_points(points, integral_type, cell,
                                                entity)

            # basix
            tbl = component_element.tabulate(deriv_order, entity_points)
            index = basix_index(*derivative_counts)
            tbl = tbl[index].transpose()

            # Prepare a padded table with zeros
            padded_shape = (
                basix_element.dim, ) + basix_element.value_shape + (
                    len(entity_points), )
            padded_tbl = numpy.zeros(padded_shape, dtype=tbl.dtype)

            tab = tbl.reshape(slice_size(ir), slice_size(cr), -1)

            padded_tbl[slice(*ir), slice(*cr)] = tab

            component_tables.append(padded_tbl[:, flat_component, :])

    if avg in ("cell", "facet"):
        # Compute numeric integral of the each component table
        wsum = sum(weights)
        for entity, tbl in enumerate(component_tables):
            num_dofs = tbl.shape[0]
            tbl = numpy.dot(tbl, weights) / wsum
            tbl = numpy.reshape(tbl, (num_dofs, 1))
            component_tables[entity] = tbl

    # Loop over entities and fill table blockwise (each block = points x dofs)
    # Reorder axes as (points, dofs) instead of (dofs, points)
    assert len(component_tables) == num_entities
    num_dofs, num_points = component_tables[0].shape
    shape = (num_entities, num_points, num_dofs)
    res = numpy.zeros(shape)
    for entity in range(num_entities):
        res[entity, :, :] = numpy.transpose(component_tables[entity])
    return res
Exemplo n.º 16
0
def build_optimized_tables(quadrature_rule,
                           cell,
                           integral_type,
                           entitytype,
                           modified_terminals,
                           existing_tables,
                           rtol=default_rtol,
                           atol=default_atol):

    # Build tables needed by all modified terminals
    tables, mt_table_names, table_origins = build_element_tables(
        quadrature_rule,
        cell,
        integral_type,
        entitytype,
        modified_terminals,
        rtol=rtol,
        atol=atol)

    # Optimize tables and get table name and dofrange for each modified terminal
    unique_tables, unique_table_origins, table_unames, table_ranges, table_dofmaps, table_permuted, \
        table_original_num_dofs = optimize_element_tables(
            tables, table_origins, rtol=rtol, atol=atol)

    # Get num_dofs for all tables before they can be deleted later
    unique_table_num_dofs = {
        uname: tbl.shape[-1]
        for uname, tbl in unique_tables.items()
    }

    # Analyze tables for properties useful for optimization
    unique_table_ttypes = analyse_table_types(unique_tables,
                                              rtol=rtol,
                                              atol=atol)

    # Compress tables that are constant along num_entities or num_points
    for uname, tabletype in unique_table_ttypes.items():
        if tabletype in piecewise_ttypes:
            # Reduce table to dimension 1 along num_points axis in generated code
            unique_tables[uname] = unique_tables[uname][:, :, :1, :]
        if tabletype in uniform_ttypes:
            # Reduce table to dimension 1 along num_entities axis in generated code
            unique_tables[uname] = unique_tables[uname][:, :1, :, :]
        if not table_permuted[uname]:
            # Reduce table to dimenstion 2 along num_perms axis in generated code
            unique_tables[uname] = unique_tables[uname][:1, :, :, :]

    # Delete tables not referenced by modified terminals
    used_unames = set(table_unames[name] for name in mt_table_names.values())
    unused_unames = set(unique_tables.keys()) - used_unames
    for uname in unused_unames:
        del unique_table_ttypes[uname]
        del unique_tables[uname]

    # Change tables to point to existing optimized tables
    # (i.e. tables from other contexts that have been compressed to look the same)
    name_map = {}
    existing_names = sorted(existing_tables)
    for uname in sorted(unique_tables):
        utbl = unique_tables[uname]
        for i, ename in enumerate(existing_names):
            etbl = existing_tables[ename]
            if equal_tables(utbl, etbl, rtol=rtol, atol=atol):
                # Setup table name mapping
                name_map[uname] = ename
                # Don't visit this table again (just to avoid the processing)
                existing_names.pop(i)
                break

    # Replace unique table names
    for uname, ename in name_map.items():
        unique_tables[ename] = existing_tables[ename]
        del unique_tables[uname]
        unique_table_ttypes[ename] = unique_table_ttypes[uname]
        del unique_table_ttypes[uname]

    needs_transformation_data = False
    # Build mapping from modified terminal to unique table with metadata
    # { mt: (unique name,
    #        (table dof range begin, table dof range end),
    #        [top parent element dof index for each local index],
    #        ttype, original_element_dim) }
    mt_unique_table_reference = {}
    for mt, name in list(mt_table_names.items()):
        # Get metadata for the original table (name is not the unique name!)
        dofrange = table_ranges[name]
        dofmap = table_dofmaps[name]
        original_dim = table_original_num_dofs[name]
        is_permuted = table_permuted[name]
        if is_permuted:
            needs_transformation_data = True

        # Map name -> uname
        uname = table_unames[name]

        # Map uname -> ename
        ename = name_map.get(uname, uname)

        # Some more metadata stored under the ename
        ttype = unique_table_ttypes[ename]

        offset = 0
        # Add offset to dofmap and dofrange for restricted terminals
        if mt.restriction and isinstance(mt.terminal,
                                         ufl.classes.FormArgument):
            # offset = 0 or number of dofs before table optimization
            offset = ufc_restriction_offset(mt.restriction, original_dim)
            (b, e) = dofrange
            dofrange = (b + offset, e + offset)
            dofmap = tuple(i + offset for i in dofmap)

        base_transformations = [[[
            p[i - offset][j - offset] for j in dofmap
        ] for i in dofmap] for p in create_basix_element(
            table_origins[name][0]).base_transformations]

        needs_transformation_data = False
        for p in base_transformations:
            if not numpy.allclose(p, numpy.identity(len(p))):
                needs_transformation_data = True

        # Store reference to unique table for this mt
        mt_unique_table_reference[mt] = unique_table_reference_t(
            ename, unique_tables[ename], dofrange, dofmap, original_dim, ttype,
            ttype in piecewise_ttypes, ttype in uniform_ttypes, is_permuted,
            base_transformations, needs_transformation_data)

    return (unique_tables, unique_table_ttypes, unique_table_num_dofs,
            mt_unique_table_reference)
Exemplo n.º 17
0
def test_discontinuous_lagrange(degree, expected_dim):
    "Test space dimensions of discontinuous Lagrange elements."
    P = create_basix_element(FiniteElement("DG", "triangle", degree))
    assert P.dim == expected_dim