Exemple #1
0
def _interpolate_vertex_values(ufl_element, fiat_element):
    "Compute intermediate representation of interpolate_vertex_values."

    # Check for QuadratureElement
    for e in all_elements(fiat_element):
        if isinstance(e, QuadratureElement):
            return "Function is not supported/implemented for QuadratureElement."
        if isinstance(e, HDivTrace):
            return "Function is not implemented for HDivTrace."

    cell = ufl_element.cell()
    cellname = cell.cellname()
    tdim = cell.topological_dimension()
    gdim = cell.geometric_dimension()

    ir = {}
    ir["geometric_dimension"] = gdim
    ir["topological_dimension"] = tdim

    # Check whether computing the Jacobian is necessary
    mappings = fiat_element.mapping()
    ir["needs_jacobian"] = any("piola" in m for m in mappings)
    ir["needs_oriented"] = needs_oriented_jacobian(fiat_element)

    # See note in _evaluate_dofs
    ir["reference_value_size"] = ufl_element.reference_value_size()
    ir["physical_value_size"] = ufl_element.value_size()

    # Get vertices of reference cell
    fiat_cell = reference_cell(cellname)
    vertices = fiat_cell.get_vertices()

    # Compute data for each constituent element
    all_fiat_elm = all_elements(fiat_element)
    ir["element_data"] = [
        {
            # NB! value_shape of fiat element e means reference_value_shape
           "reference_value_size": product(e.value_shape()),

           # FIXME: THIS IS A BUG:
           "physical_value_size": product(e.value_shape()),  # FIXME: Get from corresponding ufl element?

           "basis_values": e.tabulate(0, vertices)[(0,) * tdim].transpose(),
           "mapping": e.mapping()[0],
           "space_dim": e.space_dimension(),
        }
        for e in all_fiat_elm]

    # FIXME: Temporary hack!
    if len(ir["element_data"]) == 1:
        ir["element_data"][0]["physical_value_size"] = ir["physical_value_size"]

    # Consistency check, related to note in _evaluate_dofs
    # This will fail for e.g. (RT1 x DG0) on a manifold because of the above bug
    if sum(data["physical_value_size"] for data in ir["element_data"]) != ir["physical_value_size"]:
        ir = "Failed to set physical value size correctly for subelements."
    elif sum(data["reference_value_size"] for data in ir["element_data"]) != ir["reference_value_size"]:
        ir = "Failed to set reference value size correctly for subelements."

    return ir
def _extract_element_data(element_map, element_numbers):
    "Extract element data for psi_tables"

    # Iterate over map
    element_data = {}
    for elements in six.itervalues(element_map):
        for ufl_element, counter in six.iteritems(elements):

            # Create corresponding FIAT element
            fiat_element = create_element(ufl_element)

            # Compute value size
            value_size = product(ufl_element.value_shape())

            # Get element number
            element_number = element_numbers.get(ufl_element)
            if element_number is None:
                # FIXME: Should not be necessary, we should always know the element number
                #warning("Missing element number, likely because vector elements are not yet supported in custom integrals.")
                pass

            # Store data
            element_data[counter] = {
                "value_size": value_size,
                "num_element_dofs": fiat_element.space_dimension(),
                "element_number": element_number
            }

    return element_data
Exemple #3
0
def _tabulate_empty_psi_table(tdim, deriv_order, element):
    "Tabulate psi table when there are no points (custom integrals)."

    # All combinations of partial derivatives up to given order
    gdim = tdim  # hack, consider passing gdim variable here
    derivs = [
        d
        for d in itertools.product(*(gdim * [list(range(0, deriv_order + 1))]))
    ]
    derivs = [d for d in derivs if sum(d) <= deriv_order]

    # Return empty table
    table = {}
    for d in derivs:
        value_shape = element.value_shape()
        if value_shape == ():
            table[d] = [[]]
        else:
            value_size = product(value_shape)
            table[d] = [[[] for c in range(value_size)]]

    # Let entity be 0 even for non-cells, this is for
    # custom integrals where we don't need tables to
    # contain multiple entitites
    entity = 0
    return {entity: table}
def _extract_element_data(element_map, element_numbers):
    "Extract element data for psi_tables"

    # Iterate over map
    element_data = {}
    for elements in six.itervalues(element_map):
        for ufl_element, counter in six.iteritems(elements):

            # Create corresponding FIAT element
            fiat_element = create_element(ufl_element)

            # Compute value size
            value_size = product(ufl_element.value_shape())

            # Get element number
            element_number = element_numbers.get(ufl_element)
            if element_number is None:
                # FIXME: Should not be necessary, we should always know the element number
                #warning("Missing element number, likely because vector elements are not yet supported in custom integrals.")
                pass

            # Store data
            element_data[counter] = {"value_size":      value_size,
                                     "num_element_dofs": fiat_element.space_dimension(),
                                     "element_number":  element_number}

    return element_data
def _value_size(element):
    """Compute value size of element, aka the number of components.

    The value size of a scalar field is 1, the value size of a vector
    field (is the number of components), the value size of a higher
    dimensional tensor field is the product of the value_shape of the
    field. Recall that all mixed elements are flattened.
    """
    shape = element.value_shape()
    if shape == ():
        return 1
    return product(shape)
def _value_size(element):
    """Compute value size of element, aka the number of components.

    The value size of a scalar field is 1, the value size of a vector
    field (is the number of components), the value size of a higher
    dimensional tensor field is the product of the value_shape of the
    field. Recall that all mixed elements are flattened.
    """
    shape = element.value_shape()
    if shape == ():
        return 1
    return product(shape)
def _optimize_tensor_contraction(A0, rank):
    "Compute optimized tensor contraction for given reference tensor."

    # Select FErari optimization algorithm
    if rank == 2:
        optimize = binary.optimize
    elif rank == 1:
        optimize = binary.optimize_action
    else:
        warning("Tensor optimization only available for rank 1 and 2 tensors, skipping optimizations")
        return None

    # Write a message
    info("Calling FErari to optimize tensor of size %s (%d entries)",
         " x ".join(str(d) for d in shape(A0)), product(shape(A0)))#

    # Compute optimized tensor contraction
    return optimize(A0)
Exemple #8
0
def _tabulate_empty_psi_table(tdim, deriv_order, element):
    "Tabulate psi table when there are no points"

    # All combinations of partial derivatives up to given order
    gdim = tdim # hack, consider passing gdim variable here
    derivs = [d for d in itertools.product(*(gdim*[list(range(0, deriv_order + 1))]))]
    derivs = [d for d in derivs if sum(d) <= deriv_order]

    # Return empty table
    table = {}
    for d in derivs:
        value_shape = element.value_shape()
        if value_shape == ():
            table[d] = [[]]
        else:
            value_size = product(value_shape)
            table[d] = [[[] for c in range(value_size)]]

    return {None: table}
Exemple #9
0
def _optimize_tensor_contraction(A0, rank):
    "Compute optimized tensor contraction for given reference tensor."

    # Select FErari optimization algorithm
    if rank == 2:
        optimize = binary.optimize
    elif rank == 1:
        optimize = binary.optimize_action
    else:
        warning(
            "Tensor optimization only available for rank 1 and 2 tensors, skipping optimizations"
        )
        return None

    # Write a message
    info("Calling FErari to optimize tensor of size %s (%d entries)",
         " x ".join(str(d) for d in shape(A0)), product(shape(A0)))  #

    # Compute optimized tensor contraction
    return optimize(A0)
Exemple #10
0
def _generate_reference_offsets(fiat_element, offset=0):
    """Generate offsets: i.e value offset for each basis function
    relative to a reference element representation."""

    if isinstance(fiat_element, MixedElement):
        offsets = []
        for e in fiat_element.elements():
            offsets += _generate_reference_offsets(e, offset)
            # NB! This is the fiat element and therefore value_shape means reference_value_shape
            offset += product(e.value_shape())
        return offsets

    elif isinstance(fiat_element, EnrichedElement):
        offsets = []
        for e in fiat_element.elements():
            offsets += _generate_reference_offsets(e, offset)
        return offsets

    else:
        return [offset]*fiat_element.space_dimension()
Exemple #11
0
def _tabulate_empty_psi_table(tdim, deriv_order, element):
    "Tabulate psi table when there are no points"

    # All combinations of partial derivatives up to given order
    gdim = tdim  # hack, consider passing gdim variable here
    derivs = [
        d
        for d in itertools.product(*(gdim * [list(range(0, deriv_order + 1))]))
    ]
    derivs = [d for d in derivs if sum(d) <= deriv_order]

    # Return empty table
    table = {}
    for d in derivs:
        value_shape = element.value_shape()
        if value_shape == ():
            table[d] = [[]]
        else:
            value_size = product(value_shape)
            table[d] = [[[] for c in range(value_size)]]

    return {None: table}
Exemple #12
0
def _tabulate_empty_psi_table(tdim, deriv_order, element):
    "Tabulate psi table when there are no points (custom integrals)."

    # All combinations of partial derivatives up to given order
    gdim = tdim  # hack, consider passing gdim variable here
    derivs = [d for d in itertools.product(*(gdim*[list(range(0, deriv_order + 1))]))]
    derivs = [d for d in derivs if sum(d) <= deriv_order]

    # Return empty table
    table = {}
    for d in derivs:
        value_shape = element.value_shape()
        if value_shape == ():
            table[d] = [[]]
        else:
            value_size = product(value_shape)
            table[d] = [[[] for c in range(value_size)]]

    # Let entity be 0 even for non-cells, this is for
    # custom integrals where we don't need tables to
    # contain multiple entitites
    entity = 0
    return {entity: table}
def _extract_element_data(element_map, classnames):
    "Extract element data for psi_tables"

    # Iterate over map
    element_data = {}
    for elements in element_map.values():
        for ufl_element, counter in elements.items():

            # Create corresponding FIAT element
            fiat_element = create_element(ufl_element)

            # Compute value size
            value_size = product(ufl_element.value_shape())

            # Get element classname
            element_classname = classnames["finite_element"][ufl_element]

            # Store data
            element_data[counter] = {"physical_value_size": value_size,
                                     "num_element_dofs": fiat_element.space_dimension(),
                                     "classname": element_classname}

    return element_data
Exemple #14
0
def _interpolate_vertex_values(ufl_element, fiat_element):
    "Compute intermediate representation of interpolate_vertex_values."

    # Check for QuadratureElement
    for e in all_elements(fiat_element):
        if isinstance(e, QuadratureElement):
            return "Function is not supported/implemented for QuadratureElement."
        if isinstance(e, HDivTrace):
            return "Function is not implemented for HDivTrace."

    cell = ufl_element.cell()
    cellname = cell.cellname()
    tdim = cell.topological_dimension()
    gdim = cell.geometric_dimension()

    ir = {}
    ir["geometric_dimension"] = gdim
    ir["topological_dimension"] = tdim

    # Check whether computing the Jacobian is necessary
    mappings = fiat_element.mapping()
    ir["needs_jacobian"] = any("piola" in m for m in mappings)
    ir["needs_oriented"] = needs_oriented_jacobian(fiat_element)

    # See note in _evaluate_dofs
    ir["reference_value_size"] = ufl_element.reference_value_size()
    ir["physical_value_size"] = ufl_element.value_size()

    # Get vertices of reference cell
    fiat_cell = reference_cell(cellname)
    vertices = fiat_cell.get_vertices()

    # Compute data for each constituent element
    all_fiat_elm = all_elements(fiat_element)
    ir["element_data"] = [
        {
            # NB! value_shape of fiat element e means reference_value_shape
            "reference_value_size": product(e.value_shape()),

            # FIXME: THIS IS A BUG:
            "physical_value_size": product(
                e.value_shape()),  # FIXME: Get from corresponding ufl element?
            "basis_values": e.tabulate(0, vertices)[(0, ) * tdim].transpose(),
            "mapping": e.mapping()[0],
            "space_dim": e.space_dimension(),
        } for e in all_fiat_elm
    ]

    # FIXME: Temporary hack!
    if len(ir["element_data"]) == 1:
        ir["element_data"][0]["physical_value_size"] = ir[
            "physical_value_size"]

    # Consistency check, related to note in _evaluate_dofs
    # This will fail for e.g. (RT1 x DG0) on a manifold because of the above bug
    if sum(data["physical_value_size"]
           for data in ir["element_data"]) != ir["physical_value_size"]:
        ir = "Failed to set physical value size correctly for subelements."
    elif sum(data["reference_value_size"]
             for data in ir["element_data"]) != ir["reference_value_size"]:
        ir = "Failed to set reference value size correctly for subelements."

    return ir
Exemple #15
0
def _evaluate_basis(ufl_element, fiat_element, epsilon):
    "Compute intermediate representation for evaluate_basis."
    cell = ufl_element.cell()
    cellname = cell.cellname()

    # Handle Mixed and EnrichedElements by extracting 'sub' elements.
    elements = _extract_elements(fiat_element)
    physical_offsets = _generate_physical_offsets(ufl_element)
    reference_offsets = _generate_reference_offsets(fiat_element)
    mappings = fiat_element.mapping()

    # This function is evidently not implemented for TensorElements
    for e in elements:
        if (len(e.value_shape()) > 1) and (e.num_sub_elements() != 1):
            return "Function not supported/implemented for TensorElements."

    # Handle QuadratureElement, not supported because the basis is
    # only defined at the dof coordinates where the value is 1, so not
    # very interesting.
    for e in elements:
        if isinstance(e, QuadratureElement):
            return "Function not supported/implemented for QuadratureElement."
        if isinstance(e, HDivTrace):
            return "Function not supported for Trace elements"

    # Skip this function for TensorProductElement if get_coeffs is not implemented
    for e in elements:
        try:
            e.get_coeffs()
        except NotImplementedError:
            return "Function is not supported/implemented."

    # Initialise data with 'global' values.
    data = {
        "reference_value_size": ufl_element.reference_value_size(),
        "physical_value_size": ufl_element.value_size(),
        "cellname": cellname,
        "topological_dimension": cell.topological_dimension(),
        "geometric_dimension": cell.geometric_dimension(),
        "space_dimension": fiat_element.space_dimension(),
        "needs_oriented": needs_oriented_jacobian(fiat_element),
        "max_degree": max([e.degree() for e in elements])
    }

    # Loop element and space dimensions to generate dof data.
    dof = 0
    dofs_data = []
    for e in elements:
        num_components = product(e.value_shape())
        coeffs = e.get_coeffs()
        num_expansion_members = e.get_num_members(e.degree())
        dmats = e.dmats()

        # Clamp dmats zeros
        dmats = numpy.asarray(dmats)
        dmats[numpy.where(numpy.isclose(dmats, 0.0, rtol=epsilon,
                                        atol=epsilon))] = 0.0

        # Extracted parts of dd below that are common for the element
        # here.  These dict entries are added to each dof_data dict
        # for each dof, because that's what the code generation
        # implementation expects.  If the code generation needs this
        # structure to be optimized in the future, we can store this
        # data for each subelement instead of for each dof.
        subelement_data = {
            "embedded_degree": e.degree(),
            "num_components": num_components,
            "dmats": dmats,
            "num_expansion_members": num_expansion_members,
        }
        value_rank = len(e.value_shape())

        for i in range(e.space_dimension()):
            if num_components == 1:
                coefficients = [coeffs[i]]
            elif value_rank == 1:
                # Handle coefficients for vector valued basis elements
                # [Raviart-Thomas, Brezzi-Douglas-Marini (BDM)].
                coefficients = [coeffs[i][c] for c in range(num_components)]
            elif value_rank == 2:
                # Handle coefficients for tensor valued basis elements.
                # [Regge]
                coefficients = [
                    coeffs[i][p][q] for p in range(e.value_shape()[0])
                    for q in range(e.value_shape()[1])
                ]
            else:
                error("Unknown situation with num_components > 1")

            # Clamp coefficient zeros
            coefficients = numpy.asarray(coefficients)
            coefficients[numpy.where(
                numpy.isclose(coefficients, 0.0, rtol=epsilon,
                              atol=epsilon))] = 0.0

            dof_data = {
                "coeffs": coefficients,
                "mapping": mappings[dof],
                "physical_offset": physical_offsets[dof],
                "reference_offset": reference_offsets[dof],
            }
            # Still storing element data in dd to avoid rewriting dependent code
            dof_data.update(subelement_data)

            # This list will hold one dd dict for each dof
            dofs_data.append(dof_data)
            dof += 1

    data["dofs_data"] = dofs_data

    return data
Exemple #16
0
def _evaluate_basis(ufl_element, fiat_element, epsilon):
    "Compute intermediate representation for evaluate_basis."
    cell = ufl_element.cell()
    cellname = cell.cellname()

    # Handle Mixed and EnrichedElements by extracting 'sub' elements.
    elements = _extract_elements(fiat_element)
    physical_offsets = _generate_physical_offsets(ufl_element)
    reference_offsets = _generate_reference_offsets(fiat_element)
    mappings = fiat_element.mapping()

    # This function is evidently not implemented for TensorElements
    for e in elements:
        if (len(e.value_shape()) > 1) and (e.num_sub_elements() != 1):
            return "Function not supported/implemented for TensorElements."

    # Handle QuadratureElement, not supported because the basis is
    # only defined at the dof coordinates where the value is 1, so not
    # very interesting.
    for e in elements:
        if isinstance(e, QuadratureElement):
            return "Function not supported/implemented for QuadratureElement."
        if isinstance(e, HDivTrace):
            return "Function not supported for Trace elements"

    # Skip this function for TensorProductElement if get_coeffs is not implemented
    for e in elements:
        try:
            e.get_coeffs()
        except NotImplementedError:
            return "Function is not supported/implemented."

    # Initialise data with 'global' values.
    data = {"reference_value_size": ufl_element.reference_value_size(),
            "physical_value_size": ufl_element.value_size(),
            "cellname": cellname,
            "topological_dimension": cell.topological_dimension(),
            "geometric_dimension": cell.geometric_dimension(),
            "space_dimension": fiat_element.space_dimension(),
            "needs_oriented": needs_oriented_jacobian(fiat_element),
            "max_degree": max([e.degree() for e in elements])
            }

    # Loop element and space dimensions to generate dof data.
    dof = 0
    dofs_data = []
    for e in elements:
        num_components = product(e.value_shape())
        coeffs = e.get_coeffs()
        num_expansion_members = e.get_num_members(e.degree())
        dmats = e.dmats()

        # Clamp dmats zeros
        dmats = numpy.asarray(dmats)
        dmats[numpy.where(numpy.isclose(dmats, 0.0, rtol=epsilon, atol=epsilon))] = 0.0

        # Extracted parts of dd below that are common for the element
        # here.  These dict entries are added to each dof_data dict
        # for each dof, because that's what the code generation
        # implementation expects.  If the code generation needs this
        # structure to be optimized in the future, we can store this
        # data for each subelement instead of for each dof.
        subelement_data = {
            "embedded_degree": e.degree(),
            "num_components": num_components,
            "dmats": dmats,
            "num_expansion_members": num_expansion_members,
        }
        value_rank = len(e.value_shape())

        for i in range(e.space_dimension()):
            if num_components == 1:
                coefficients = [coeffs[i]]
            elif value_rank == 1:
                # Handle coefficients for vector valued basis elements
                # [Raviart-Thomas, Brezzi-Douglas-Marini (BDM)].
                coefficients = [coeffs[i][c]
                                for c in range(num_components)]
            elif value_rank == 2:
                # Handle coefficients for tensor valued basis elements.
                # [Regge]
                coefficients = [coeffs[i][p][q]
                                for p in range(e.value_shape()[0])
                                for q in range(e.value_shape()[1])]
            else:
                error("Unknown situation with num_components > 1")

            # Clamp coefficient zeros
            coefficients = numpy.asarray(coefficients)
            coefficients[numpy.where(numpy.isclose(coefficients, 0.0, rtol=epsilon, atol=epsilon))] = 0.0

            dof_data = {
                "coeffs": coefficients,
                "mapping": mappings[dof],
                "physical_offset": physical_offsets[dof],
                "reference_offset": reference_offsets[dof],
            }
            # Still storing element data in dd to avoid rewriting dependent code
            dof_data.update(subelement_data)

            # This list will hold one dd dict for each dof
            dofs_data.append(dof_data)
            dof += 1

    data["dofs_data"] = dofs_data

    return data