def power(self, o):
        #print("\n\nVisiting Power: " + repr(o))

        # Get base and exponent.
        base, expo = o.operands()

        # Visit base to get base code.
        base_code = self.visit(base)

        # TODO: Are these safety checks needed? Need to check for None?
        ffc_assert(() in base_code and len(base_code) == 1, "Only support function type base: " + repr(base_code))

        # Get the base code.
        val = base_code[()]

        # Handle different exponents
        if isinstance(expo, IntValue):
            return {(): format["power"](val, expo.value())}
        elif isinstance(expo, FloatValue):
            return {(): format["std power"](val, format["floating point"](expo.value()))}
        elif isinstance(expo, (Coefficient, Operator)):
            exp = self.visit(expo)
            return {(): format["std power"](val, exp[()])}
        else:
            error("power does not support this exponent: " + repr(expo))
Пример #2
0
def arnold_winther_dofs(element):
    "Special fix for Arnold-Winther elements until Rob fixes in FIAT."

    if not element.cell().cellname() == "triangle":
        error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.")

    return [("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointScaledNormalEval", {(1.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
            ("PointScaledNormalEval", {(2.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
            ("PointScaledNormalEval", {(3.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
            ("PointScaledNormalEval", {(4.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
            ("PointScaledNormalEval", {(4.0/5, 1.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
            ("PointScaledNormalEval", {(3.0/5, 2.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
            ("PointScaledNormalEval", {(2.0/5, 3.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
            ("PointScaledNormalEval", {(1.0/5, 4.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
            ("PointScaledNormalEval", {(0.0,   1.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
            ("PointScaledNormalEval", {(0.0,   2.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
            ("PointScaledNormalEval", {(0.0,   3.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
            ("PointScaledNormalEval", {(0.0,   4.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
            ("IntegralMoment", None),
            ("IntegralMoment", None),
            ("IntegralMoment", None)]
Пример #3
0
def argyris_dofs(element):
    "Special fix for Hermite elements until Rob fixes in FIAT."

    if not element.degree() == 5:
        error("Unable to plot element, only know how to plot quintic Argyris elements.")

    if not element.cell().cellname() == "triangle":
        error("Unable to plot element, only know how to plot Argyris on triangles.")

    return [("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}),
            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}),
            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}),
            ("PointDeriv",       {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointDeriv",       {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointDeriv",       {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointDeriv",       {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointDeriv",       {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointDeriv",       {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
            ("PointNormalDeriv", {(0.5, 0.0): [ (0.0, (0,)), (-1.0,  (1,))]}),
            ("PointNormalDeriv", {(0.5, 0.5): [ (1.0, (0,)), ( 1.0,  (1,))]}),
            ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), ( 0.0,  (1,))]})]
Пример #4
0
def _tabulate_dmats(L, dof_data):
    "Tabulate the derivatives of the polynomial base"

    # Get derivative matrices (coefficients) of basis functions,
    # computed by FIAT at compile time.

    code = [L.Comment("Tables of derivatives of the polynomial base (transpose).")]

    # Generate tables for each spatial direction.
    for i, dmat in enumerate(dof_data["dmats"]):

        # Extract derivatives for current direction (take transpose,
        # FIAT_NEW PolynomialSet.tabulate()).
        matrix = numpy.transpose(dmat)

        # Get shape and check dimension (This is probably not needed).
        #        shape = numpy.shape(matrix)
        if not (matrix.shape[0] == matrix.shape[1] == dof_data["num_expansion_members"]):
            error("Something is wrong with the shape of dmats.")

        # Declare varable name for coefficients.
        table = L.Symbol("dmats%d" % i)
        code += [L.ArrayDecl("static const double", table, matrix.shape, matrix)]

    return code
Пример #5
0
def create_element(ufl_element):

    # Create element signature for caching (just use UFL element)
    element_signature = ufl_element

    # Check cache
    element = _cache.get(element_signature, None)
    if element:
        debug("Reusing element from cache")
        return element

    if isinstance(ufl_element, ufl.MixedElement):
        # Create mixed element (implemented by FFC)
        elements = _extract_elements(ufl_element)
        element = MixedElement(elements)
    elif isinstance(ufl_element, ufl.RestrictedElement):
        # Create restricted element(implemented by FFC)
        element = _create_restricted_element(ufl_element)
    elif isinstance(ufl_element, (ufl.FiniteElement, ufl.OuterProductElement, ufl.EnrichedElement, ufl.BrokenElement, ufl.TraceElement, ufl.FacetElement, ufl.InteriorElement)):
        # Create regular FIAT finite element
        element = _create_fiat_element(ufl_element)
    else:
        error("Cannot handle this element type: %s" % str(ufl_element))

    # Store in cache
    _cache[element_signature] = element

    return element
Пример #6
0
 def division(self, o, ops):
     if len(ops) != 2:
         error("Expecting two operands.")
     if len(ops[1]) != 1:
         error("Expecting scalar divisor.")
     b, = ops[1]
     return [o._ufl_expr_reconstruct_(a, b) for a in ops[0]]
Пример #7
0
def _tabulate_tensor(vals):
    "Tabulate a multidimensional tensor. (Replace tabulate_matrix and tabulate_vector)."

    # Prefetch formats to speed up code generation
    f_block     = format["block"]
    f_list_sep  = format["list separator"]
    f_block_sep = format["block separator"]
    # FIXME: KBO: Change this to "float" once issue in set_float_formatting is fixed.
    f_float     = format["floating point"]
    f_epsilon   = format["epsilon"]

    # Create numpy array and get shape.
    tensor = numpy.array(vals)
    shape = numpy.shape(tensor)
    if len(shape) == 1:
        # Create zeros if value is smaller than tolerance.
        values = []
        for v in tensor:
            if abs(v) < f_epsilon:
                values.append(f_float(0.0))
            else:
                values.append(f_float(v))
        # Format values.
        return f_block(f_list_sep.join(values))
    elif len(shape) > 1:
        return f_block(f_block_sep.join([_tabulate_tensor(tensor[i]) for i in range(shape[0])]))
    else:
        error("Not an N-dimensional array:\n%s" % tensor)
def create_element(ufl_element):

    # Create element signature for caching (just use UFL element)
    element_signature = ufl_element

    # Check cache
    if element_signature in _cache:
        debug("Reusing element from cache")
        return _cache[element_signature]

    # Create regular FIAT finite element
    if isinstance(ufl_element, ufl.FiniteElement):
        element = _create_fiat_element(ufl_element)

    # Create mixed element (implemented by FFC)
    elif isinstance(ufl_element, ufl.MixedElement):
        elements = _extract_elements(ufl_element)
        element = MixedElement(elements)

    # Create element union (implemented by FFC)
    elif isinstance(ufl_element, ufl.EnrichedElement):
        elements = [create_element(e) for e in ufl_element._elements]
        element = EnrichedElement(elements)

    # Create restricted element(implemented by FFC)
    elif isinstance(ufl_element, ufl.RestrictedElement):
        element = _create_restricted_element(ufl_element)

    else:
        error("Cannot handle this element type: %s" % str(ufl_element))

    # Store in cache
    _cache[element_signature] = element

    return element
Пример #9
0
def create_quadrature_points_and_weights(integral_type, cell, degree, rule):
    "Create quadrature rule and return points and weights."
    if integral_type == "cell":
        (points, weights) = create_quadrature(cell, degree, rule)
    elif integral_type == "exterior_facet" or integral_type == "interior_facet":
        # Since quadrilaterals use OuterProductElements, the degree is usually
        # a tuple, though not always. For example, in the constant times dx case
        # the degree is always a single number.
        if cell.cellname() == "quadrilateral" and isinstance(degree, tuple):
            assert len(degree) == 2 and degree[0] == degree[1]
            degree = degree[0]
        (points, weights) = create_quadrature(cellname2facetname[cell.cellname()], degree, rule)
    elif integral_type in ("exterior_facet_top", "exterior_facet_bottom", "interior_facet_horiz"):
        (points, weights) = create_quadrature(cell.facet_horiz, degree[0], rule)
    elif integral_type in ("exterior_facet_vert", "interior_facet_vert"):
        if cell.topological_dimension() == 2:
            # extruded interval, so the vertical facet is a line, not an OP cell
            (points, weights) = create_quadrature(cell.facet_vert, degree[1], rule)
        else:
            (points, weights) = create_quadrature(cell.facet_vert, degree, rule)
    elif integral_type == "vertex":
        (points, weights) = ([()], numpy.array([1.0,])) # TODO: Will be fixed
    elif integral_type == "custom":
        (points, weights) = (None, None)
    else:
        error("Unknown integral type: " + str(integral_type))
    return (points, weights)
Пример #10
0
    def sum(self, o, *operands):
        code = {}
        # Loop operands that has to be summend.
        for op in operands:
            # If entries does already exist we can add the code,
            # otherwise just dump them in the element tensor.
            for key, val in sorted(op.items()):
                if key in code:
                    code[key].append(val)
                else:
                    code[key] = [val]

        # Add sums and group if necessary.
        for key, val in sorted_by_key(code):
            if len(val) > 1:
                code[key] = create_sum(val)
            elif val:
                code[key] = val[0]
            else:
                error("Where did the values go?")
            # If value is zero just ignore it.
            if abs(code[key].val) < format["epsilon"]:
                del code[key]

        return code
Пример #11
0
    def power(self, o):
        # Get base and exponent.
        base, expo = o.ufl_operands

        # Visit base to get base code.
        base_code = self.visit(base)

        # TODO: Are these safety checks needed?
        ffc_assert(() in base_code and len(base_code) == 1,
                   "Only support function type base: " + repr(base_code))

        # Get the base code and create power.
        val = base_code[()]

        # Handle different exponents
        if isinstance(expo, IntValue):
            return {(): create_product([val] * expo.value())}
        elif isinstance(expo, FloatValue):
            exp = format["floating point"](expo.value())
            sym = create_symbol(format["std power"](str(val), exp), val.t,
                                val, 1)
            return {(): sym}
        elif isinstance(expo, (Coefficient, Operator)):
            exp = self.visit(expo)[()]
            sym = create_symbol(format["std power"](str(val), exp), val.t,
                                val, 1)
            return {(): sym}
        else:
            error("power does not support this exponent: " + repr(expo))
Пример #12
0
 def reference_facet_volume(self, e, mt, tabledata, access):
     L = self.language
     cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
     if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
         return L.Symbol("{0}_reference_facet_volume".format(cellname))
     else:
         error("Unhandled cell types {0}.".format(cellname))
Пример #13
0
 def jacobian(self, e, mt, tabledata, num_points):
     L = self.language
     if mt.global_derivatives:
         error("Not expecting global derivatives of Jacobian.")
     if mt.averaged:
         error("Not expecting average of Jacobian.")
     return self.symbols.J_component(mt)
Пример #14
0
def _indices(element, restriction_domain, tdim):
    "Extract basis functions indices that correspond to restriction_domain."

    # FIXME: The restriction_domain argument in FFC/UFL needs to be re-thought and
    # cleaned-up.

    # If restriction_domain is "interior", pick basis functions associated with
    # cell.
    if restriction_domain == "interior":
        return element.entity_dofs()[tdim][0]

    # Pick basis functions associated with
    # the topological degree of the restriction_domain and of all lower
    # dimensions.
    if restriction_domain == "facet":
        rdim = tdim-1
    elif restriction_domain == "face":
        rdim = 2
    elif restriction_domain == "edge":
        rdim = 1
    elif restriction_domain == "vertex":
        rdim = 0
    else:
        error("Restriction to domain: %s, is not supported." % repr(restriction_domain))

    entity_dofs = element.entity_dofs()
    indices = []
    for dim in range(rdim + 1):
        entities = entity_dofs[dim]
        for (entity, index) in sorted_by_key(entities):
            indices += index
    return indices
Пример #15
0
    def __init__(self, basename):
        ufc_templates = ffc.backends.ufc.templates
        self._header_template = ufc_templates[basename + "_header"]
        self._implementation_template = ufc_templates[basename + "_implementation"]
        self._combined_template = ufc_templates[basename + "_combined"]
        self._jit_header_template = ufc_templates[basename + "_jit_header"]
        self._jit_implementation_template = ufc_templates[basename + "_jit_implementation"]

        r = re.compile(r"%\(([a-zA-Z0-9_]*)\)")
        self._header_keywords = set(r.findall(self._header_template))
        self._implementation_keywords = set(r.findall(self._implementation_template))
        self._combined_keywords = set(r.findall(self._combined_template))

        self._keywords = sorted(self._header_keywords | self._implementation_keywords)

        # Do some ufc interface template checking, to catch bugs
        # early when we change the ufc interface templates
        if set(self._keywords) != set(self._combined_keywords):
            a = set(self._header_keywords) - set(self._combined_keywords)
            b = set(self._implementation_keywords) - set(self._combined_keywords)
            c = set(self._combined_keywords) - set(self._keywords)
            msg = "Templates do not have matching sets of keywords:"
            if a:
                msg += "\n  Header template keywords '%s' are not in the combined template." % (sorted(a),)
            if b:
                msg += "\n  Implementation template keywords '%s' are not in the combined template." % (sorted(b),)
            if c:
                msg += "\n  Combined template keywords '%s' are not in the header or implementation templates." % (sorted(c),)
            error(msg)
Пример #16
0
    def index_sum(self, o, ops):
        summand, mi = o.ufl_operands
        ic = mi[0].count()
        fi = summand.ufl_free_indices
        fid = summand.ufl_index_dimensions
        ipos = fi.index(ic)
        d = fid[ipos]

        # Compute "macro-dimensions" before and after i in the total shape of a
        predim = product(summand.ufl_shape) * product(fid[:ipos])
        postdim = product(fid[ipos+1:])

        # Map each flattened total component of summand to
        # flattened total component of indexsum o by removing
        # axis corresponding to summation index ii.
        ss = ops[0]  # Scalar subexpressions of summand
        if len(ss) != predim * postdim * d:
            error("Mismatching number of subexpressions.")
        sops = []
        for i in range(predim):
            iind = i * (postdim * d)
            for k in range(postdim):
                ind = iind + k
                sops.append([ss[ind + j * postdim] for j in range(d)])

        # For each scalar output component, sum over collected subcomponents
        # TODO: Need to split this into binary additions to work with future CRSArray format,
        #       i.e. emitting more expressions than there are symbols for this node.
        results = [sum(sop) for sop in sops]
        return results
Пример #17
0
def expand_operations(expression, format):
    """This function expands an expression and returns the value. E.g.,
    ((x + y))             --> x + y
    2*(x + y)             --> 2*x + 2*y
    (x + y)*(x + y)       --> x*x + y*y + 2*x*y
    z*(x*(y + 3) + 2) + 1 --> 1 + 2*z + x*y*z + x*z*3
    z*((y + 3)*x + 2) + 1 --> 1 + 2*z + x*y*z + x*z*3"""

    # Get formats
    add   = format["add"](["", ""])
    mult  = format["multiply"](["", ""])
    group = format["grouping"]("")
    l = group[0]
    r = group[1]

    # Check that we have the same number of left/right parenthesis in expression
    if not expression.count(l) == expression.count(r):
        error("Number of left/right parenthesis do not match")

    # If we don't have any parenthesis, group variables and return
    if expression.count(l) == 0:
        return group_vars(expression, format)

    # Get list of additions
    adds = split_expression(expression, format, add)
    new_adds = []

    # Loop additions and get products
    for a in adds:
        prods = split_expression(a, format, mult)
        prods.sort()
        new_prods = []

        # FIXME: Should we use deque here?
        expanded = []
        for i, p in enumerate(prods):
            # If we have a group, expand inner expression
            if p[0] == l and p[-1] == r:
                # Add remaining products to new products and multiply with all
                # terms from expanded variable
                expanded_var = expand_operations(p[1:-1], format)
                expanded.append( split_expression(expanded_var, format, add) )

            # Else, just add variable to list of new products
            else:
                new_prods.append(p)

        if expanded:
            # Combine all expanded variables and multiply by factor
            while len(expanded) > 1:
                first = expanded.pop(0)
                second = expanded.pop(0)
                expanded = [[mult.join([i] + [j]) for i in first for j in second]] + expanded
            new_adds += [mult.join(new_prods + [e]) for e in expanded[0]]
        else:
            # Else, just multiply products and add to list of products
            new_adds.append( mult.join(new_prods) )

    # Group variables and return
    return group_vars(add.join(new_adds), format)
    def sum(self, o, *operands):
        #print("Visiting Sum: " + repr(o) + "\noperands: " + "\n".join(map(repr, operands)))

        code = {}
        # Loop operands that has to be summend.
        for op in operands:
            # If entries does already exist we can add the code, otherwise just
            # dump them in the element tensor.
            for key, val in op.items():
                if key in code:
                    code[key].append(val)
                else:
                    code[key] = [val]

        # Add sums and group if necessary.
        for key, val in code.items():
            if len(val) > 1:
                code[key] = create_sum(val)
            elif val:
                code[key] = val[0]
            else:
                error("Where did the values go?")
            # If value is zero just ignore it.
            if abs(code[key].val) < format["epsilon"]:
                del code[key]

        return code
Пример #19
0
def _create_fiat_element(ufl_element):
    "Create FIAT element corresponding to given finite element."

    # Get element data
    family = ufl_element.family()
    cell = ufl_element.cell()
    degree = ufl_element.degree()

    # Check that FFC supports this element
    ffc_assert(family in supported_families,
               "This element family (%s) is not supported by FFC." % family)

    # Handle the space of the constant
    if family == "Real":
        dg0_element = ufl.FiniteElement("DG", cell, 0)
        constant = _create_fiat_element(dg0_element)
        element = SpaceOfReals(constant)

    # Handle the specialized time elements
    elif family == "Lobatto" :
        element = FFCLobattoElement(ufl_element.degree())

    elif family == "Radau" :
        element = FFCRadauElement(ufl_element.degree())

    # FIXME: AL: Should this really be here?
    # Handle QuadratureElement
    elif family == "Quadrature":
        element = FFCQuadratureElement(ufl_element)

    else:
        # Create FIAT cell
        fiat_cell = reference_cell(cell.cellname())

        # Handle Bubble element as RestrictedElement of P_{k} to interior
        if family == "Bubble":
            V = FIAT.supported_elements["Lagrange"](fiat_cell, degree)
            dim = cell.topological_dimension()
            return RestrictedElement(V, _indices(V, "interior", dim), None)

        # Check if finite element family is supported by FIAT
        if not family in FIAT.supported_elements:
            error("Sorry, finite element of type \"%s\" are not supported by FIAT.", family)

        # Create FIAT finite element
        ElementClass = FIAT.supported_elements[family]
        if degree is None:
            element = ElementClass(fiat_cell)
        else:
            element = ElementClass(fiat_cell, degree)

    # Consistency check between UFL and FIAT elements. This will not hold for elements
    # where the reference value shape is different from the global value shape, i.e.
    # RT elements on a triangle in 3D.
    #ffc_assert(element.value_shape() == ufl_element.value_shape(),
    #           "Something went wrong in the construction of FIAT element from UFL element." + \
    #           "Shapes are %s and %s." % (element.value_shape(), ufl_element.value_shape()))

    return element
    def __init__(self, element, indices, domain):
        if len(indices) == 0:
            error("No point in creating empty RestrictedElement.")

        self._element = element
        self._indices = indices
        self._entity_dofs = _extract_entity_dofs(element, indices)
        self._domain = domain
Пример #21
0
def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type):
    num_vertices = ir["num_vertices"]
    num_facets = ir["num_facets"]

    if integral_type == "cell":
        # Compute transformed integrals.
        info("Transforming cell integral")
        transformer.update_cell()
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type in ("exterior_facet", "exterior_facet_vert"):
        # Compute transformed integrals.
        info("Transforming exterior facet integral")
        transformer.update_facets(format["facet"](None), None)
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type == "exterior_facet_bottom":
        # Compute transformed integrals.
        info("Transforming exterior bottom facet integral")
        transformer.update_facets(0, None)  # 0 == bottom
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type == "exterior_facet_top":
        # Compute transformed integrals.
        info("Transforming exterior top facet integral")
        transformer.update_facets(1, None)  # 1 == top
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type in ("interior_facet", "interior_facet_vert"):
        # Compute transformed integrals.
        info("Transforming interior facet integral")
        transformer.update_facets(format["facet"]("+"), format["facet"]("-"))
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type == "interior_facet_horiz":
        # Compute transformed integrals.
        info("Transforming interior horizontal facet integral")
        # Generate the code we need, corresponding to facet 1 [top] of
        # the lower element, and facet 0 [bottom] of the top element
        transformer.update_facets(1, 0)
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type == "vertex":
        # Compute transformed integrals.
        info("Transforming vertex integral (%d)" % i)
        transformer.update_vertex(format["vertex"])
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    elif integral_type == "custom":

        # Compute transformed integrals: same as for cell integrals
        info("Transforming custom integral")
        transformer.update_cell()
        terms = _transform_integrals(transformer, integrals_dict, integral_type)

    else:
        error("Unhandled domain type: " + str(integral_type))
    return terms
Пример #22
0
def compute_integral_ir(itg_data,
                        form_data,
                        form_id,
                        element_numbers,
                        parameters):
    "Compute intermediate represention of integral."

    info("Computing tensor representation")

    # Extract monomial representation
    integrands = [itg.integrand() for itg in itg_data.integrals]
    monomial_form = extract_monomial_form(integrands, form_data.function_replace_map)

    # Transform monomial form to reference element
    transform_monomial_form(monomial_form)

    # Get some integral properties
    integral_type = itg_data.integral_type
    quadrature_degree = itg_data.metadata["quadrature_degree"]
    quadrature_rule = itg_data.metadata["quadrature_rule"]

    # Get some cell properties
    cell = itg_data.domain.ufl_cell()
    num_facets = cell.num_facets()

    # Helper to simplify code below
    compute_terms = lambda i, j: _compute_terms(monomial_form,
                                           i, j,
                                           integral_type,
                                           quadrature_degree,
                                           quadrature_rule,
                                           cell)

    # Compute representation of cell tensor
    if integral_type == "cell":
        # Compute sum of tensor representations
        terms = compute_terms(None, None)

    elif integral_type == "exterior_facet":
        # Compute sum of tensor representations for each facet
        terms = [compute_terms(i, None) for i in range(num_facets)]

    elif integral_type == "interior_facet":
        # Compute sum of tensor representations for each facet-facet pair
        terms = [[compute_terms(i, j) for j in range(num_facets)] for i in range(num_facets)]
        for i in range(num_facets):
            for j in range(num_facets):
                reorder_entries(terms[i][j])

    else:
        error("Unhandled domain type: " + str(integral_type))

    # Initialize representation and store terms
    ir = initialize_integral_ir("tensor", itg_data, form_data, form_id)
    ir["AK"] = terms

    return ir
Пример #23
0
    def facet_orientation(self, e, mt, tabledata, num_points):
        L = self.language
        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
        if cellname not in ("interval", "triangle", "tetrahedron"):
            error("Unhandled cell types {0}.".format(cellname))

        table = L.Symbol("{0}_facet_orientations".format(cellname))
        facet = self.symbols.entity("facet", mt.restriction)
        return table[facet]
Пример #24
0
 def reference_normal(self, e, mt, tabledata, access):
     L = self.language
     cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
     if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
         table = L.Symbol("{0}_reference_facet_normals".format(cellname))
         facet = self.symbols.entity("facet", mt.restriction)
         return table[facet][mt.component[0]]
     else:
         error("Unhandled cell types {0}.".format(cellname))
Пример #25
0
def reference_cell(cell):
    # really want to be using cells only, but sometimes only cellname is passed
    # in. FIAT handles the cases.
    
    # I hope nothing is still passing in just dimension...
    if isinstance(cell, int):
        error("%s was passed into reference_cell(). Need cell or cellname." % str(cell))

    return FIAT.ufc_cell(cell)
Пример #26
0
    def calculate_basisvalues(ufl_cell, fiat_element):
        f_component     = format["component"]
        f_decl          = format["declaration"]
        f_float_decl    = format["float declaration"]
        f_tensor        = format["tabulate tensor"]
        f_new_line      = format["new line"]

        tdim = ufl_cell.topological_dimension()
        gdim = ufl_cell.geometric_dimension()

        code = []

        # Symbolic tabulation
        tabs = fiat_element.tabulate(0, np.array([[sp.Symbol("reference_coords.X[%d]" % i)
                                                   for i in xrange(tdim)]]))
        tabs = tabs[(0,) * tdim]
        tabs = tabs.reshape(tabs.shape[:-1])

        # Generate code for intermediate values
        s_code, (theta,) = ssa_arrays([tabs])
        for name, value in s_code:
            code += [f_decl(f_float_decl, name, c_print(value))]

        # Prepare Jacobian, Jacobian inverse and determinant
        s_detJ = sp.Symbol('detJ')
        s_J = np.array([[sp.Symbol("J[{i}*{tdim} + {j}]".format(i=i, j=j, tdim=tdim))
                         for j in range(tdim)]
                        for i in range(gdim)])
        s_Jinv = np.array([[sp.Symbol("K[{i}*{gdim} + {j}]".format(i=i, j=j, gdim=gdim))
                            for j in range(gdim)]
                           for i in range(tdim)])

        # Apply transformations
        phi = []
        for i, val in enumerate(theta):
            mapping = fiat_element.mapping()[i]
            if mapping == "affine":
                phi.append(val)
            elif mapping == "contravariant piola":
                phi.append(s_J.dot(val) / s_detJ)
            elif mapping == "covariant piola":
                phi.append(s_Jinv.transpose().dot(val))
            else:
                error("Unknown mapping: %s" % mapping)
        phi = np.asarray(phi, dtype=object)

        # Dump tables of basis values
        code += ["", "\t// Values of basis functions"]
        code += [f_decl("double", f_component("phi", phi.shape),
                        f_new_line + f_tensor(phi))]

        shape = phi.shape
        if len(shape) <= 1:
            vdim = 1
        elif len(shape) == 2:
            vdim = shape[1]
        return "\n".join(code), vdim
Пример #27
0
def create_quadrature(cell, degree, scheme="default"):
    """
    Generate quadrature rule (points, weights) for given shape
    that will integrate an polynomial of order 'degree' exactly.
    """

    if isinstance(cell, str):
        cellname = cell
    else:
        cellname = cell.cellname()

    if cellname == "vertex":
        return ([()], array([1.0,]))

    if scheme == "default":
        if cellname == "tetrahedron":
            return _tetrahedron_scheme(degree)
        elif cellname == "triangle":
            return _triangle_scheme(degree)
        else:
            return _fiat_scheme(cell, degree)
    elif scheme == "vertex":
        # The vertex scheme, i.e., averaging the function value in the vertices
        # and multiplying with the simplex volume, is only of order 1 and
        # inferior to other generic schemes in terms of error reduction.
        # Equation systems generated with the vertex scheme have some
        # properties that other schemes lack, e.g., the mass matrix is
        # a simple diagonal matrix. This may be prescribed in certain cases.
        #
        if degree > 1:
            from warnings import warn
            warn(("Explicitly selected vertex quadrature (degree 1), "
                 +"but requested degree is %d.") % degree)
        if cellname == "tetrahedron":
            return (array([ [0.0, 0.0, 0.0],
                            [1.0, 0.0, 0.0],
                            [0.0, 1.0, 0.0],
                            [0.0, 0.0, 1.0] ]),
                    array([1.0/24.0, 1.0/24.0, 1.0/24.0, 1.0/24.0])
                    )
        elif cellname == "triangle":
            return (array([ [0.0, 0.0],
                            [1.0, 0.0],
                            [0.0, 1.0] ]),
                    array([1.0/6.0, 1.0/6.0, 1.0/6.0])
                    )
        else:
            # Trapezoidal rule.
            return (array([ [0.0, 0.0],
                            [0.0, 1.0] ]),
                    array([1.0/2.0, 1.0/2.0])
                    )
    elif scheme == "canonical":
        return _fiat_scheme(cell, degree)
    else:
        error("Unknown quadrature scheme: %s." % scheme)
Пример #28
0
def compile_with_error_control(forms, object_names, reserved_objects,
                               prefix, parameters):
    """
    Compile forms and additionally generate and compile forms required
    for performing goal-oriented error control

    For linear problems, the input forms should be a bilinear form (a)
    and a linear form (L) specifying the variational problem and
    additionally a linear form (M) specifying the goal functional.

    For nonlinear problems, the input should be linear form (F) and a
    functional (M) specifying the goal functional.

    *Arguments*

        forms (tuple)
            Three (linear case) or two (nonlinear case) forms
            specifying the primal problem and the goal

        object_names (dict)
            Map from object ids to object names

        reserved_names (dict)
            Map from reserved object names to object ids

        prefix (string)
            Basename of header file

        parameters (dict)
            Parameters for form compilation
    """

    # Check input arguments
    F, M, u = prepare_input_arguments(forms, object_names, reserved_objects)

    # Generate forms to be used for the error control
    from ffc.errorcontrol.errorcontrolgenerators import UFLErrorControlGenerator
    generator = UFLErrorControlGenerator(F, M, u)
    ec_forms = generator.generate_all_error_control_forms()

    # Check that there are no conflicts between user defined and
    # generated names
    ec_names = generator.ec_names
    if set(object_names.values()) & set(ec_names.values()):
        comment = "%s are reserved error control names." % str(sorted(ec_names.values()))
        error("Conflict between user defined and generated names: %s" % comment)

    # Add names generated for error control to object_names
    for (objid, name) in sorted_by_key(ec_names):
        object_names[objid] = name

    # Compile error control and input (pde + goal) forms as normal
    forms = generator.primal_forms()
    code_h, code_c = compile_form(ec_forms + forms, object_names, prefix, parameters)

    return code_h, code_c
def _create_quadrature_points_and_weights(domain_type, cell, degree, rule):
    if domain_type == "cell":
        (points, weights) = create_quadrature(cell.cellname(), degree, rule)
    elif domain_type == "exterior_facet" or domain_type == "interior_facet":
        (points, weights) = create_quadrature(cell.facet_cellname(), degree, rule)
    elif domain_type == "point":
        (points, weights) = ([()], numpy.array([1.0,])) # TODO: Will be fixed
    else:
        error("Unknown integral type: " + str(domain_type))
    return (points, weights)
Пример #30
0
 def reference_cell_edge_vectors(self, e, mt, tabledata, num_points):
     L = self.language
     cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
     if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"):
         table = L.Symbol("{0}_reference_edge_vectors".format(cellname))
         return table[mt.component[0]][mt.component[1]]
     elif cellname == "interval":
         error("The reference cell edge vectors doesn't make sense for interval cell.")
     else:
         error("Unhandled cell types {0}.".format(cellname))
 def math_function(self, o):
     print "\n\nVisiting MathFunction:", repr(o)
     error("This MathFunction is not supported (yet).")
 def geometric_quantity(self, o):
     print "\n\nVisiting GeometricQuantity:", repr(o)
     error("This type of GeometricQuantity is not supported (yet).")
 def _create_entry_data(self, val):
     error("This function should be implemented by the child class.")
 def _atan_2_function(self, operands, format_function):
     error("This function should be implemented by the child class.")
 def compound_tensor_operator(self, o):
     print "\n\nVisiting CompoundTensorOperator: ", repr(o)
     error("CompoundTensorOperator should have been expanded.")
 def _format_scalar_value(self, value):
     error("This function should be implemented by the child class.")
 def abs(self, o, *operands):
     print "\n\nVisiting Abs: ", repr(o)
     error("This object should be implemented by the child class.")
 def condition(self, o):
     print "\n\nVisiting Condition:", repr(o)
     error("This type of Condition is not supported (yet).")
 def cell_volume(self, o):
     print "\n\nVisiting CellVolume: ", repr(o)
     error("This object should be implemented by the child class.")
 def facet_normal(self, o):
     print "\n\nVisiting FacetNormal: ", repr(o)
     error("This object should be implemented by the child class.")
 def circumradius(self, o):
     print "\n\nVisiting Circumeradius: ", repr(o)
     error("This object should be implemented by the child class.")
 def _create_symbol(self, symbol, domain):
     error("This function should be implemented by the child class.")
 def _create_product(self, symbols):
     error("This function should be implemented by the child class.")
 def constant_base(self, o):
     print "\n\nVisiting ConstantBase:", repr(o)
     error("This type of ConstantBase is not supported (yet).")
 def power(self, o):
     print "\n\nVisiting Power: ", repr(o)
     error("This object should be implemented by the child class.")
 def index_annotated(self, o):
     print "\n\nVisiting IndexAnnotated:", repr(o)
     error("Only child classes of IndexAnnotated is supported.")
 def derivative(self, o, *operands):
     print "\n\nVisiting Derivative: ", repr(o)
     error("All derivatives apart from Grad should have been expanded!!")
 def label(self, o):
     print "\n\nVisiting Label: ", repr(o)
     error("What is a Lable doing in the integrand?")
 def terminal(self, o):
     print "\n\nVisiting basic Terminal:", repr(o), "with operands:"
     error("This terminal is not handled: " + repr(o))
 def restricted(self, o):
     print "\n\nVisiting Restricted:", repr(o)
     error("This type of Restricted is not supported (only positive and negative are currently supported).")
 def _count_operations(self, expression):
     error("This function should be implemented by the child class.")
 def algebra_operator(self, o, *operands):
     print "\n\nVisiting AlgebraOperator: ", repr(o)
     error("This type of AlgebraOperator should have been expanded!!" + repr(o))
 def bessel_function(self, o):
     print "\n\nVisiting BesselFunction:", repr(o)
     error("BesselFunction is not implemented (yet).")
 def expr(self, o):
     print "\n\nVisiting basic Expr:", repr(o), "with operands:"
     error("This expression is not handled: " + repr(o))
 def atan_2_function(self, o):
     print "\n\nVisiting Atan2Function:", repr(o)
     error("Atan2Function is not implemented (yet).")
Пример #56
0
    def _modified_terminal(self, v, i):
        """Modifiers:
        terminal           - the underlying Terminal object
        global_derivatives - tuple of ints, each meaning derivative in that global direction
        local_derivatives  - tuple of ints, each meaning derivative in that local direction
        reference_value    - bool, whether this is represented in reference frame
        averaged           - None, 'facet' or 'cell'
        restriction        - None, '+' or '-'
        component          - tuple of ints, the global component of the Terminal
        flat_component     - single int, flattened local component of the Terminal, considering symmetry
        """
        # (1) mt.terminal.ufl_shape defines a core indexing space UNLESS mt.reference_value,
        #     in which case the reference value shape of the element must be used.
        # (2) mt.terminal.ufl_element().symmetry() defines core symmetries
        # (3) averaging and restrictions define distinct symbols, no additional symmetries
        # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries

        # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component
        # to analyse, which should be sufficient to get the base shape and derivatives
        if v.ufl_shape:
            mt = analyse_modified_terminal(v[(0,) * len(v.ufl_shape)])
        else:
            mt = analyse_modified_terminal(v)

        # Get derivatives
        num_ld = len(mt.local_derivatives)
        num_gd = len(mt.global_derivatives)
        assert not (num_ld and num_gd)
        if num_ld:
            domain = mt.terminal.ufl_domain()
            tdim = domain.topological_dimension()
            d_components = compute_indices((tdim,) * num_ld)
        elif num_gd:
            domain = mt.terminal.ufl_domain()
            gdim = domain.geometric_dimension()
            d_components = compute_indices((gdim,) * num_gd)
        else:
            d_components = [()]

        # Get base shape without the derivative axes
        base_components = compute_indices(mt.base_shape)

        # Build symbols with symmetric components and derivatives skipped
        symbols = []
        mapped_symbols = {}
        for bc in base_components:
            for dc in d_components:
                # Build mapped component mc with symmetries from element and derivatives combined
                mbc = mt.base_symmetry.get(bc, bc)
                mdc = tuple(sorted(dc))
                mc = mbc + mdc

                # Get existing symbol or create new and store with mapped component mc as key
                s = mapped_symbols.get(mc)
                if s is None:
                    s = self.new_symbol()
                    mapped_symbols[mc] = s
                symbols.append(s)

        # Consistency check before returning symbols
        assert not v.ufl_free_indices
        if product(v.ufl_shape) != len(symbols):
            error("Internal error in value numbering.")
        return symbols
Пример #57
0
def optimise_code(expr, ip_consts, geo_consts, trans_set):
    """Optimise a given expression with respect to, basis functions,
    integration points variables and geometric constants.
    The function will update the dictionaries ip_const and geo_consts with new
    declarations and update the trans_set (used transformations)."""

    #    print "expr: ", repr(expr)

    format_G = format["geometry constant"]
    #    format_ip = format["integration points"]
    format_I = format["ip constant"]
    trans_set_update = trans_set.update

    # Return constant symbol if expanded value is zero.
    exp_expr = expr.expand()
    if exp_expr.val == 0.0:
        return create_float(0)

    # Reduce expression with respect to basis function variable.
    basis_expressions = exp_expr.reduce_vartype(BASIS)

    # If we had a product instance we'll get a tuple back so embed in list.
    if not isinstance(basis_expressions, list):
        basis_expressions = [basis_expressions]

    basis_vals = []
    # Process each instance of basis functions.
    for basis, ip_expr in basis_expressions:
        # Get the basis and the ip expression.
        #        debug("\nbasis\n" + str(basis))
        #        debug("ip_epxr\n" + str(ip_expr))
        #        print "\nbasis\n" + str(basis)
        #        print "ip_epxr\n" + str(ip_expr)
        #        print "ip_epxr\n" + repr(ip_expr)
        #        print "ip_epxr\n" + repr(ip_expr.expand())

        # If we have no basis (like functionals) create a const.
        if not basis:
            basis = create_float(1)
        # NOTE: Useful for debugging to check that terms where properly reduced.
#        if Product([basis, ip_expr]).expand() != expr.expand():
#            prod = Product([basis, ip_expr]).expand()
#            print "prod == sum: ", isinstance(prod, Sum)
#            print "expr == sum: ", isinstance(expr, Sum)

#            print "prod.vrs: ", prod.vrs
#            print "expr.vrs: ", expr.vrs
#            print "expr.vrs = prod.vrs: ", expr.vrs == prod.vrs

#            print "equal: ", prod == expr

#            print "\nprod:    ", prod
#            print "\nexpr:    ", expr
#            print "\nbasis:   ", basis
#            print "\nip_expr: ", ip_expr
#            error("Not equal")

# If the ip expression doesn't contain any operations skip remainder.
#        if not ip_expr:
        if not ip_expr or ip_expr.val == 0.0:
            basis_vals.append(basis)
            continue
        if not ip_expr.ops() > 0:
            basis_vals.append(create_product([basis, ip_expr]))
            continue

        # Reduce the ip expressions with respect to IP variables.
        ip_expressions = ip_expr.expand().reduce_vartype(IP)

        # If we had a product instance we'll get a tuple back so embed in list.
        if not isinstance(ip_expressions, list):
            ip_expressions = [ip_expressions]

#        # Debug code to check that reduction didn't screw up anything
#        for ip in ip_expressions:
#            ip_dec, geo = ip
#            print "geo: ", geo
#            print "ip_dec: ", ip_dec
#        vals = []
#        for ip in ip_expressions:
#            ip_dec, geo = ip
#            if ip_dec and geo:
#                vals.append(Product([ip_dec, geo]))
#            elif geo:
#                vals.append(geo)
#            elif ip_dec:
#                vals.append(ip_dec)

#        if Sum(vals).expand() != ip_expr.expand():
##        if Sum([Product([ip, geo]) for ip, geo in ip_expressions]).expand() != ip_expr.expand():
#            print "\nip_expr: ", repr(ip_expr)
##            print "\nip_expr: ", str(ip_expr)
##            print "\nip_dec: ", repr(ip_dec)
##            print "\ngeo: ", repr(geo)
#            for ip in ip_expressions:
#                ip_dec, geo = ip
#                print "geo: ", geo
#                print "ip_dec: ", ip_dec
#            error("Not equal")

        ip_vals = []
        # Loop ip expressions.
        for ip in sorted(ip_expressions):
            ip_dec, geo = ip
            #            debug("\nip_dec: " + str(ip_dec))
            #            debug("\ngeo: " + str(geo))
            #            print "\nip_dec: " + repr(ip_dec)
            #            print "\ngeo: " + repr(geo)
            #            print "exp:  ", geo.expand()
            #            print "val:  ", geo.expand().val
            #            print "repx: ", repr(geo.expand())
            # NOTE: Useful for debugging to check that terms where properly reduced.
            #            if Product([ip_dec, geo]).expand() != ip_expr.expand():
            #                print "\nip_expr: ", repr(ip_expr)
            #                print "\nip_dec: ", repr(ip_dec)
            #                print "\ngeo: ", repr(geo)
            #                error("Not equal")

            # Update transformation set with those values that might be embedded in IP terms.
            #            if ip_dec:
            if ip_dec and ip_dec.val != 0.0:
                trans_set_update([str(x) for x in ip_dec.get_unique_vars(GEO)])

            # Append and continue if we did not have any geo values.
#            if not geo:
            if not geo or geo.val == 0.0:
                if ip_dec and ip_dec.val != 0.0:
                    ip_vals.append(ip_dec)
                continue

            # Update the transformation set with the variables in the geo term.
            trans_set_update([str(x) for x in geo.get_unique_vars(GEO)])

            # Only declare auxiliary geo terms if we can save operations.
            #            geo = geo.expand().reduce_ops()
            if geo.ops() > 0:
                #                debug("geo: " + str(geo))
                #                print "geo: " + str(geo)
                # If the geo term is not in the dictionary append it.
                #                if not geo in geo_consts:
                if not geo in geo_consts:
                    geo_consts[geo] = len(geo_consts)

                # Substitute geometry expression.
                geo = create_symbol(format_G(geo_consts[geo]), GEO)

            # If we did not have any ip_declarations use geo, else create a
            # product and append to the list of ip_values.
#            if not ip_dec:
            if not ip_dec or ip_dec.val == 0.0:
                ip_dec = geo
            else:
                ip_dec = create_product([ip_dec, geo])
            ip_vals.append(ip_dec)

        # Create sum of ip expressions to multiply by basis.
        if len(ip_vals) > 1:
            ip_expr = create_sum(ip_vals)
        elif ip_vals:
            ip_expr = ip_vals.pop()

        # If we can save operations by declaring it as a constant do so, if it
        # is not in IP dictionary, add it and use new name.
#        ip_expr = ip_expr.expand().reduce_ops()
#        if ip_expr.ops() > 0:
        if ip_expr.ops() > 0 and ip_expr.val != 0.0:
            #            if not ip_expr in ip_consts:
            if not ip_expr in ip_consts:
                ip_consts[ip_expr] = len(ip_consts)

            # Substitute ip expression.
#            ip_expr = create_symbol(format_G + format_ip + str(ip_consts[ip_expr]), IP)
            ip_expr = create_symbol(format_I(ip_consts[ip_expr]), IP)

        # Multiply by basis and append to basis vals.


#        prod = create_product([basis, ip_expr])
#        if prod.expand().val != 0.0:
#            basis_vals.append(prod)
        basis_vals.append(create_product([basis, ip_expr]))

    # Return (possible) sum of basis values.
    if len(basis_vals) > 1:
        return create_sum(basis_vals)
    elif basis_vals:
        return basis_vals[0]
    # Where did the values go?
    error("Values disappeared.")
Пример #58
0
def _evaluate_basis(ufl_element, fiat_element):
    "Compute intermediate representation for evaluate_basis."
    cell = ufl_element.cell()
    cellname = cell.cellname()

    # Handle Mixed and EnrichedElements by extracting 'sub' elements.
    elements = _extract_elements(fiat_element)
    physical_offsets = _generate_physical_offsets(ufl_element)
    reference_offsets = _generate_reference_offsets(fiat_element)
    mappings = fiat_element.mapping()

    # This function is evidently not implemented for TensorElements
    for e in elements:
        if (len(e.value_shape()) > 1) and (e.num_sub_elements() != 1):
            return "Function not supported/implemented for TensorElements."

    # Handle QuadratureElement, not supported because the basis is only defined
    # at the dof coordinates where the value is 1, so not very interesting.
    for e in elements:
        if isinstance(e, QuadratureElement):
            return "Function not supported/implemented for QuadratureElement."
        if isinstance(e, DiscontinuousLagrangeTrace):
            return "Function not supported for Trace elements"

    # Initialise data with 'global' values.
    data = {"reference_value_size": ufl_element.reference_value_size(),
            "physical_value_size": ufl_element.value_size(),
            "cellname" : cellname,
            "topological_dimension" : cell.topological_dimension(),
            "geometric_dimension" : cell.geometric_dimension(),
            "space_dimension" : fiat_element.space_dimension(),
            "needs_oriented": needs_oriented_jacobian(fiat_element),
            "max_degree": max([e.degree() for e in elements])
            }

    # Loop element and space dimensions to generate dof data.
    dof = 0
    dofs_data = []
    for e in elements:
        num_components = product(e.value_shape())
        coeffs = e.get_coeffs()
        num_expansion_members = e.get_num_members(e.degree())
        dmats = e.dmats()

        # Extracted parts of dd below that are common for the element here.
        # These dict entries are added to each dof_data dict for each dof,
        # because that's what the code generation implementation expects.
        # If the code generation needs this structure to be optimized in the
        # future, we can store this data for each subelement instead of for each dof.
        subelement_data = {
            "embedded_degree" : e.degree(),
            "num_components" : num_components,
            "dmats" : dmats,
            "num_expansion_members": num_expansion_members,
            }
        value_rank = len(e.value_shape())

        for i in range(e.space_dimension()):
            if num_components == 1:
                coefficients = [coeffs[i]]
            elif value_rank == 1:
                # Handle coefficients for vector valued basis elements
                # [Raviart-Thomas, Brezzi-Douglas-Marini (BDM)].
                coefficients = [coeffs[i][c]
                                for c in range(num_components)]
            elif value_rank == 2:
                # Handle coefficients for tensor valued basis elements.
                # [Regge]
                coefficients = [coeffs[i][p][q]
                                for p in range(e.value_shape()[0])
                                for q in range(e.value_shape()[1])]
            else:
                error("Unknown situation with num_components > 1")

            dof_data = {
                "coeffs" : coefficients,
                "mapping" : mappings[dof],
                "physical_offset" : physical_offsets[dof],
                "reference_offset" : reference_offsets[dof],
                }
            # Still storing element data in dd to avoid rewriting dependent code
            dof_data.update(subelement_data)

            # This list will hold one dd dict for each dof
            dofs_data.append(dof_data)
            dof += 1

    data["dofs_data"] = dofs_data

    return data
    def _create_function_name(self, component, deriv, avg, is_quad_element, ufl_function, ffc_element):
        ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.")

        # Get string for integration points.
        f_ip = "0" if (avg or self.points == 1) else format["integration points"]

        # Get the element counter.
        element_counter = self.element_map[1 if avg else self.points][ufl_function.element()]

        # Get current cell entity, with current restriction considered
        entity = self._get_current_entity()

        # Set to hold used nonzero columns
        used_nzcs = set()

        # Create basis name and map to correct basis and get info.
        generate_psi_name = format["psi name"]
        psi_name = generate_psi_name(element_counter, self.entitytype, entity, component, deriv, avg)
        psi_name, non_zeros, zeros, ones = self.name_map[psi_name]

        # If all basis are zero we just return None.
        if zeros and self.optimise_parameters["ignore zero tables"]:
            return self._format_scalar_value(None)[()]

        # Get the index range of the loop index.
        loop_index_range = shape(self.unique_tables[psi_name])[1]
        if loop_index_range > 1:
            # Pick first free index of secondary type
            # (could use primary indices, but it's better to avoid confusion).
            loop_index = format["free indices"][0]

        # If we have a quadrature element we can use the ip number to look
        # up the value directly. Need to add offset in case of components.
        if is_quad_element:
            quad_offset = 0
            if component:
                # FIXME: Should we add a member function elements() to FiniteElement?
                if isinstance(ffc_element, MixedElement):
                    for i in range(component):
                        quad_offset += ffc_element.elements()[i].space_dimension()
                elif component != 1:
                    error("Can't handle components different from 1 if we don't have a MixedElement.")
                else:
                    quad_offset += ffc_element.space_dimension()
            if quad_offset:
                coefficient_access = format["add"]([f_ip, str(quad_offset)])
            else:
                if non_zeros and f_ip == "0":
                    # If we have non zero column mapping but only one value just pick it.
                    # MSA: This should be an exact refactoring of the previous logic,
                    #      but I'm not sure if these lines were originally intended
                    #      here in the quad_element section, or what this even does:
                    coefficient_access = str(non_zeros[1][0])
                else:
                    coefficient_access = f_ip

        elif non_zeros:
            if loop_index_range == 1:
                # If we have non zero column mapping but only one value just pick it.
                coefficient_access = str(non_zeros[1][0])
            else:
                used_nzcs.add(non_zeros[0])
                coefficient_access = format["component"](format["nonzero columns"](non_zeros[0]), loop_index)

        elif loop_index_range == 1:
            # If the loop index range is one we can look up the first component
            # in the coefficient array.
            coefficient_access = "0"

        else:
            # Or just set default coefficient access.
            coefficient_access = loop_index

        # Offset by element space dimension in case of negative restriction.
        offset = {"+": "", "-": str(ffc_element.space_dimension()), None: ""}[self.restriction]
        if offset:
            coefficient_access = format["add"]([coefficient_access, offset])

        # Try to evaluate coefficient access ("3 + 2" --> "5").
        try:
            coefficient_access = str(eval(coefficient_access))
            C_ACCESS = GEO
        except:
            C_ACCESS = IP
        # Format coefficient access
        coefficient = format["coefficient"](str(ufl_function.count()), coefficient_access)

        # Build and cache some function data only if we need the basis
        # MSA: I don't understand the mix of loop index range check and ones check here, but that's how it was.
        if is_quad_element or (loop_index_range == 1 and ones and self.optimise_parameters["ignore ones"]):
            # If we only have ones or if we have a quadrature element we don't need the basis.
            function_symbol_name = coefficient
            F_ACCESS = C_ACCESS

        else:
            # Add basis name to set of used tables and add matrix access.
            # TODO: We should first add this table if the function is used later
            # in the expressions. If some term is multiplied by zero and it falls
            # away there is no need to compute the function value
            self.used_psi_tables.add(psi_name)

            # Create basis access, we never need to map the entry in the basis
            # table since we will either loop the entire space dimension or the
            # non-zeros.
            basis_index = "0" if loop_index_range == 1 else loop_index
            basis_access = format["component"]("", [f_ip, basis_index])
            basis_name = psi_name + basis_access
            # Try to set access to the outermost possible loop
            if f_ip == "0" and basis_access == "0":
                B_ACCESS = GEO
                F_ACCESS = C_ACCESS
            else:
                B_ACCESS = IP
                F_ACCESS = IP

            # Format expression for function
            function_expr = self._create_product([self._create_symbol(basis_name, B_ACCESS)[()],
                                                  self._create_symbol(coefficient, C_ACCESS)[()]])

            # Check if the expression to compute the function value is already in
            # the dictionary of used function. If not, generate a new name and add.
            data = self.function_data.get(function_expr)
            if data is None:
                function_count = len(self.function_data)
                data = (function_count, loop_index_range,
                        self._count_operations(function_expr),
                        psi_name, used_nzcs, ufl_function.element())
                self.function_data[function_expr] = data
            function_symbol_name = format["function value"](data[0])

        # TODO: This access stuff was changed subtly during my refactoring, the
        # X_ACCESS vars is an attempt at making it right, make sure it is correct now!
        return self._create_symbol(function_symbol_name, F_ACCESS)[()]