Exemplo n.º 1
0
    def index_sum(self, o, ops):
        summand, mi = o.ufl_operands
        ic = mi[0].count()
        fi = summand.ufl_free_indices
        fid = summand.ufl_index_dimensions
        ipos = fi.index(ic)
        d = fid[ipos]

        # Compute "macro-dimensions" before and after i in the total shape of a
        predim = product(summand.ufl_shape) * product(fid[:ipos])
        postdim = product(fid[ipos+1:])

        # Map each flattened total component of summand to
        # flattened total component of indexsum o by removing
        # axis corresponding to summation index ii.
        ss = ops[0]  # Scalar subexpressions of summand
        if len(ss) != predim * postdim * d:
            error("Mismatching number of subexpressions.")
        sops = []
        for i in range(predim):
            iind = i * (postdim * d)
            for k in range(postdim):
                ind = iind + k
                sops.append([ss[ind + j * postdim] for j in range(d)])

        # For each scalar output component, sum over collected subcomponents
        # TODO: Need to split this into binary additions to work with future CRSArray format,
        #       i.e. emitting more expressions than there are symbols for this node.
        results = [sum(sop) for sop in sops]
        return results
Exemplo n.º 2
0
def handle_index_sum(o, ops):
    summand, mi = o.ufl_operands
    ic = mi[0].count()
    fi = summand.ufl_free_indices
    fid = summand.ufl_index_dimensions
    ipos = fi.index(ic)
    d = fid[ipos]

    # Compute "macro-dimensions" before and after i in the total shape of a
    predim = ufl.product(summand.ufl_shape) * ufl.product(fid[:ipos])
    postdim = ufl.product(fid[ipos + 1:])

    # Map each flattened total component of summand to
    # flattened total component of indexsum o by removing
    # axis corresponding to summation index ii.
    ss = ops[0]  # Scalar subexpressions of summand
    if len(ss) != predim * postdim * d:
        raise RuntimeError("Mismatching number of subexpressions.")
    sops = []
    for i in range(predim):
        iind = i * (postdim * d)
        for k in range(postdim):
            ind = iind + k
            sops.append([ss[ind + j * postdim] for j in range(d)])

    # For each scalar output component, sum over collected subcomponents
    # TODO: Need to split this into binary additions to work with future CRSArray format,
    #       i.e. emitting more expressions than there are symbols for this node.
    results = [sum(sop) for sop in sops]
    return results
Exemplo n.º 3
0
def generator(ir, parameters):
    """Generate UFC code for a finite element."""
    d = {}
    d["factory_name"] = ir.classname
    d["signature"] = "\"{}\"".format(ir.signature)
    d["geometric_dimension"] = ir.geometric_dimension
    d["topological_dimension"] = ir.topological_dimension
    d["cell_shape"] = ir.cell_shape
    d["space_dimension"] = ir.space_dimension
    d["value_rank"] = len(ir.value_shape)
    d["value_size"] = ufl.product(ir.value_shape)
    d["reference_value_rank"] = len(ir.reference_value_shape)
    d["reference_value_size"] = ufl.product(ir.reference_value_shape)
    d["degree"] = ir.degree
    d["family"] = "\"{}\"".format(ir.family)
    d["num_sub_elements"] = ir.num_sub_elements

    import ffc.codegeneration.C.cnodes as L

    d["value_dimension"] = value_dimension(L, ir.value_shape)
    d["reference_value_dimension"] = reference_value_dimension(L, ir.reference_value_shape)

    statements = evaluate_reference_basis(L, ir, parameters)
    d["evaluate_reference_basis"] = L.StatementList(statements)

    statements = evaluate_reference_basis_derivatives(L, ir, parameters)
    d["evaluate_reference_basis_derivatives"] = L.StatementList(statements)

    statements = transform_reference_basis_derivatives(L, ir, parameters)
    d["transform_reference_basis_derivatives"] = L.StatementList(statements)

    statements = transform_values(L, ir, parameters)
    d["transform_values"] = L.StatementList(statements)

    statements = tabulate_reference_dof_coordinates(L, ir, parameters)
    d["tabulate_reference_dof_coordinates"] = L.StatementList(statements)

    statements = create_sub_element(L, ir)
    d["sub_element_declaration"] = sub_element_declaration(L, ir)
    d["create_sub_element"] = statements

    # Check that no keys are redundant or have been missed
    from string import Formatter
    fieldnames = [
        fname for _, fname, _, _ in Formatter().parse(ufc_finite_element.factory) if fname
    ]
    assert set(fieldnames) == set(
        d.keys()), "Mismatch between keys in template and in formattting dict"

    # Format implementation code
    implementation = ufc_finite_element.factory.format_map(d)

    # Format declaration
    declaration = ufc_finite_element.declaration.format(factory_name=ir.classname)

    return declaration, implementation
Exemplo n.º 4
0
def _read_from_xdmf_file(fun, directory, filename, suffix, components=None):
    if components is not None:
        filename = filename + "_component_" + "".join(components)
        function_name = "function_" + "".join(components)
    else:
        function_name = "function"
    fun_rank = fun.value_rank()
    fun_dim = product(fun.value_shape())
    assert fun_rank <= 2
    if ((fun_rank is 1 and fun_dim not in (2, 3))
            or (fun_rank is 2 and fun_dim not in (4, 9))):
        fun_V = fun.function_space()
        for i in range(fun_dim):
            if components is not None:
                filename_i = filename + "_subcomponent_" + str(i)
            else:
                filename_i = filename + "_component_" + str(i)
            fun_i_V = get_function_subspace(fun_V, i)
            fun_i = Function(fun_i_V)
            if not _read_from_xdmf_file(fun_i, directory, filename_i, suffix,
                                        None):
                return False
            else:
                assign(fun.sub(i), fun_i)
        return True
    else:
        full_filename_checkpoint = os.path.join(str(directory),
                                                filename + "_checkpoint.xdmf")
        file_exists = False
        if is_io_process() and os.path.exists(full_filename_checkpoint):
            file_exists = True
        file_exists = is_io_process.mpi_comm.bcast(file_exists,
                                                   root=is_io_process.root)
        if file_exists:
            if suffix is not None:
                assert SuffixIO.exists_file(directory, filename + "_suffix")
                last_suffix = SuffixIO.load_file(directory,
                                                 filename + "_suffix")
                if suffix <= last_suffix:
                    if full_filename_checkpoint in _all_xdmf_files:
                        assert _all_xdmf_latest_suffix[
                            full_filename_checkpoint] == suffix - 1
                        _all_xdmf_latest_suffix[
                            full_filename_checkpoint] = suffix
                    else:
                        assert suffix == 0
                        _all_xdmf_files[full_filename_checkpoint] = XDMFFile(
                            full_filename_checkpoint)
                        _all_xdmf_latest_suffix[full_filename_checkpoint] = 0
                    _all_xdmf_files[full_filename_checkpoint].read_checkpoint(
                        fun, function_name, suffix)
                    return True
                else:
                    return False
            else:
                with XDMFFile(full_filename_checkpoint) as file_checkpoint:
                    file_checkpoint.read_checkpoint(fun, function_name, 0)
                return True
        else:
            return False
Exemplo n.º 5
0
    def __call__(self, *args):

        # Assume all args are x argument
        x = np.array(args)

        dim = self.ufl_domain().geometric_dimension()
        if x.shape[-1] != dim:
            raise TypeError("expected the geometry argument to be of "
                            "length %d" % dim)

        value_size = ufl.product(self.ufl_element().value_shape())
        if cpp.common.has_petsc_complex():
            values = np.empty((1, value_size), dtype=np.complex128)
        else:
            values = np.empty((1, value_size))

        # The actual evaluation
        self._cpp_object.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1:
            return values[0]

        print("Returning values: ", values)

        return values
Exemplo n.º 6
0
def _read_from_file(fun, directory, filename, suffix, components=None):
    if components is not None:
        filename = filename + "_component_" + "".join(components)
        function_name = "function_" + "".join(components)
    else:
        function_name = "function"
    fun_rank = fun.value_rank()
    fun_dim = product(fun.value_shape())
    assert fun_rank <= 2
    if ((fun_rank is 1 and fun_dim not in (2, 3))
            or (fun_rank is 2 and fun_dim not in (4, 9))):
        funs = fun.split(deepcopy=True)
        for (i, fun_i) in enumerate(funs):
            if components is not None:
                filename_i = filename + "_subcomponent_" + str(i)
            else:
                filename_i = filename + "_component_" + str(i)
            _read_from_file(fun_i, directory, filename_i, suffix, None)
            assign(fun.sub(i), fun_i)
    else:
        if suffix is not None:
            if suffix is 0:
                # Remove from storage and re-create
                try:
                    del _all_solution_files[(directory, filename)]
                except KeyError:
                    pass
                _all_solution_files[(directory, filename)] = SolutionFile(
                    directory, filename)
            file_ = _all_solution_files[(directory, filename)]
            file_.read(fun, function_name, suffix)
        else:
            file_ = SolutionFile(directory, filename)
            file_.read(fun, function_name, 0)
Exemplo n.º 7
0
    def eval(self, x: np.ndarray, cells: np.ndarray, u=None) -> np.ndarray:
        """Evaluate Function at points x, where x has shape (num_points, 3),
        and cells has shape (num_points,) and cell[i] is the index of the
        cell containing point x[i]. If the cell index is negative the
        point is ignored."""

        # Make sure input coordinates are a NumPy array
        x = np.asarray(x, dtype=np.float64)
        assert x.ndim < 3
        num_points = x.shape[0] if x.ndim == 2 else 1
        x = np.reshape(x, (num_points, -1))
        if x.shape[1] != 3:
            raise ValueError(
                "Coordinate(s) for Function evaluation must have length 3.")

        # Make sure cells are a NumPy array
        cells = np.asarray(cells, dtype=np.int32)
        assert cells.ndim < 2
        num_points_c = cells.shape[0] if cells.ndim == 1 else 1
        cells = np.reshape(cells, num_points_c)

        # Allocate memory for return value if not provided
        if u is None:
            value_size = ufl.product(self.ufl_element().value_shape())
            if common.has_petsc_complex:
                u = np.empty((num_points, value_size), dtype=np.complex128)
            else:
                u = np.empty((num_points, value_size))

        self._cpp_object.eval(x, cells, u)
        if num_points == 1:
            u = np.reshape(u, (-1, ))
        return u
Exemplo n.º 8
0
def build_scalar_graph(expressions):
    """Build list representation of expression graph covering the given expressions.

    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
    """

    # Build the initial coarse computational graph of the expression
    G = build_graph(expressions)

    assert len(expressions) == 1, "FIXME: Multiple expressions in graph building needs more work from this point on."

    # Build more fine grained computational graph of scalar subexpressions
    # TODO: Make it so that
    #   expressions[k] <-> NV[nvs[k][:]],
    #   len(nvs[k]) == value_size(expressions[k])
    scalar_expressions = rebuild_with_scalar_subexpressions(G)

    # Sanity check on number of scalar symbols/components
    assert len(scalar_expressions) == sum(product(expr.ufl_shape) for expr in expressions)

    # Build new list representation of graph where all
    # vertices of V represent single scalar operations
    e2i, V, V_targets = build_scalar_graph_vertices(scalar_expressions)

    # Compute sparse dependency matrix
    V_deps = compute_dependencies(e2i, V)

    return V, V_deps, V_targets
Exemplo n.º 9
0
def _write_to_pvd_file(fun, directory, filename, suffix, components=None):
    if components is not None:
        filename = filename + "_component_" + "".join(components)
    fun_rank = fun.value_rank()
    fun_dim = product(fun.value_shape())
    assert fun_rank <= 2
    if ((fun_rank is 1 and fun_dim not in (2, 3))
            or (fun_rank is 2 and fun_dim not in (4, 9))):
        funs = fun.split(deepcopy=True)
        for (i, fun_i) in enumerate(funs):
            if components is not None:
                filename_i = filename + "_subcomponent_" + str(i)
            else:
                filename_i = filename + "_component_" + str(i)
            _write_to_pvd_file(fun_i, directory, filename_i, suffix)
    else:
        full_filename = os.path.join(str(directory), filename + ".pvd")
        if suffix is not None:
            if full_filename in _all_pvd_files:
                assert _all_pvd_latest_suffix[full_filename] == suffix - 1
                _all_pvd_latest_suffix[full_filename] = suffix
            else:
                assert suffix == 0
                _all_pvd_files[full_filename] = File(full_filename,
                                                     "compressed")
                _all_pvd_latest_suffix[full_filename] = 0
                _all_pvd_functions[full_filename] = fun.copy(deepcopy=True)
            # Make sure to always use the same function, otherwise dolfin
            # changes the numbering and visualization is difficult in ParaView
            assign(_all_pvd_functions[full_filename], fun)
            _all_pvd_files[full_filename] << _all_pvd_functions[full_filename]
        else:
            file_ = File(full_filename, "compressed")
            file_ << fun
Exemplo n.º 10
0
    def __init__(self,
                 ufl_expression: ufl.core.expr.Expr,
                 x: np.ndarray,
                 form_compiler_parameters: dict = {}, jit_parameters: dict = {}):
        """Create dolfinx Expression.

        Represents a mathematical expression evaluated at a pre-defined set of
        points on the reference cell. This class closely follows the concept of a
        UFC Expression.

        This functionality can be used to evaluate a gradient of a Function at
        the quadrature points in all cells. This evaluated gradient can then be
        used as input to a non-FEniCS function that calculates a material
        constitutive model.

        Parameters
        ----------
        ufl_expression
            Pure UFL expression
        x
            Array of points of shape (num_points, tdim) on the reference
            element.
        form_compiler_parameters
            Parameters used in FFCX compilation of this Expression. Run `ffcx
            --help` in the commandline to see all available options.
        jit_parameters
            Parameters controlling JIT compilation of C code.

        Note
        ----
        This wrapper is responsible for the FFCX compilation of the UFL Expr
        and attaching the correct data to the underlying C++ Expression.
        """
        assert x.ndim < 3
        num_points = x.shape[0] if x.ndim == 2 else 1
        x = np.reshape(x, (num_points, -1))

        mesh = ufl_expression.ufl_domain().ufl_cargo()

        # Compile UFL expression with JIT
        ufc_expression = jit.ffcx_jit(mesh.mpi_comm(), (ufl_expression, x),
                                      form_compiler_parameters=form_compiler_parameters,
                                      jit_parameters=jit_parameters)
        self._ufl_expression = ufl_expression
        self._ufc_expression = ufc_expression

        # Setup data (evaluation points, coefficients, constants, mesh, value_size).
        # Tabulation function.
        ffi = cffi.FFI()
        fn = ffi.cast("uintptr_t", ufc_expression.tabulate_expression)

        value_size = ufl.product(self.ufl_expression.ufl_shape)

        ufl_coefficients = ufl.algorithms.extract_coefficients(ufl_expression)
        coefficients = [ufl_coefficient._cpp_object for ufl_coefficient in ufl_coefficients]

        ufl_constants = ufl.algorithms.analysis.extract_constants(ufl_expression)
        constants = [ufl_constant._cpp_object for ufl_constant in ufl_constants]

        self._cpp_object = cpp.function.Expression(coefficients, constants, mesh, x, fn, value_size)
Exemplo n.º 11
0
def build_node_sizes(V_shapes):
    "Compute all the products of a sequence of shapes."
    nv = len(V_shapes)
    V_sizes = numpy.zeros(nv, dtype=int)
    for i, sh in enumerate(V_shapes):
        V_sizes[i] = product(sh)
    return V_sizes
Exemplo n.º 12
0
    def derivative(self, y):
        if len(self.roots) == 0:
            deta = Function(y.function_space()).vector()
            return deta

        p = self.power
        factors = []
        dfactors = []
        dnormsqs = []
        normsqs = []

        for root in self.roots:
            form = self.normsq(y, root)
            normsqs.append(assemble(form))
            dnormsqs.append(assemble(derivative(form, y)))

        for normsq in normsqs:
            factor = normsq**(-p / 2.0) + self.shift
            dfactor = (-p / 2.0) * normsq**((-p / 2.0) - 1.0)

            factors.append(factor)
            dfactors.append(dfactor)

        eta = product(factors)

        deta = Function(y.function_space()).vector()

        for (solution, factor, dfactor,
             dnormsq) in zip(self.roots, factors, dfactors, dnormsqs):
            deta.axpy(float((eta / factor) * dfactor), dnormsq)

        return deta
Exemplo n.º 13
0
def build_scalar_graph(expressions):
    """Build list representation of expression graph covering the given expressions.

    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
    """

    # Build the initial coarse computational graph of the expression
    G = build_graph(expressions)

    assert len(
        expressions
    ) == 1, "FIXME: Multiple expressions in graph building needs more work from this point on."

    # Build more fine grained computational graph of scalar subexpressions
    # TODO: Make it so that
    #   expressions[k] <-> NV[nvs[k][:]],
    #   len(nvs[k]) == value_size(expressions[k])
    scalar_expressions = rebuild_with_scalar_subexpressions(G)

    # Sanity check on number of scalar symbols/components
    assert len(scalar_expressions) == sum(
        product(expr.ufl_shape) for expr in expressions)

    # Build new list representation of graph where all
    # vertices of V represent single scalar operations
    e2i, V, V_targets = build_scalar_graph_vertices(scalar_expressions)

    # Compute sparse dependency matrix
    V_deps = compute_dependencies(e2i, V)

    return V, V_deps, V_targets
Exemplo n.º 14
0
def build_node_sizes(V_shapes):
    "Compute all the products of a sequence of shapes."
    nv = len(V_shapes)
    V_sizes = numpy.zeros(nv, dtype=int)
    for i, sh in enumerate(V_shapes):
        V_sizes[i] = product(sh)
    return V_sizes
Exemplo n.º 15
0
    def generate_quadrature_tables(self):
        """Generate static tables of quadrature points and weights."""
        L = self.backend.language

        parts = []

        # No quadrature tables for custom (given argument)
        # or point (evaluation in single vertex)
        skip = ufl.measure.custom_integral_types + ufl.measure.point_integral_types
        if self.ir.integral_type in skip:
            return parts

        alignas = self.ir.params["alignas"]

        # Loop over quadrature rules
        for num_points in self.ir.all_num_points:
            varying_ir = self.ir.varying_irs[num_points]

            points, weights = self.ir.quadrature_rules[num_points]
            assert num_points == len(weights)
            assert num_points == points.shape[0]

            # Generate quadrature weights array
            if varying_ir["need_weights"]:
                wsym = self.backend.symbols.weights_table(num_points)
                parts += [
                    L.ArrayDecl("static const ufc_scalar_t",
                                wsym,
                                num_points,
                                weights,
                                alignas=alignas)
                ]

            # Generate quadrature points array
            N = ufl.product(points.shape)
            if varying_ir["need_points"] and N:
                # Flatten array: (TODO: avoid flattening here, it makes padding harder)
                flattened_points = points.reshape(N)
                psym = self.backend.symbols.points_table(num_points)
                parts += [
                    L.ArrayDecl("static const ufc_scalar_t",
                                psym,
                                N,
                                flattened_points,
                                alignas=alignas)
                ]

        # Add leading comment if there are any tables
        parts = L.commented_code_list(parts, "Quadrature rules")
        return parts
Exemplo n.º 16
0
    def ufl_evaluate(self, x, component, derivatives):
        """Function used by ufl to evaluate the Expression"""
        assert derivatives == ()  # TODO: Handle derivatives

        if component:
            shape = self.ufl_shape
            assert len(shape) == len(component)
            value_size = product(shape)
            index = flatten_multiindex(component, shape_to_strides(shape))
            values = numpy.zeros(value_size)
            # FIXME: use a function with a return value
            self(*x, values=values)
            return values[index]
        else:
            # Scalar evaluation
            return self(*x)
Exemplo n.º 17
0
    def ufl_evaluate(self, x, component, derivatives):
        """Function used by ufl to evaluate the Expression"""
        assert derivatives == ()  # TODO: Handle derivatives

        if component:
            shape = self.ufl_shape
            assert len(shape) == len(component)
            value_size = product(shape)
            index = flatten_multiindex(component, shape_to_strides(shape))
            values = numpy.zeros(value_size)
            # FIXME: use a function with a return value
            self(*x, values=values)
            return values[index]
        else:
            # Scalar evaluation
            return self(*x)
Exemplo n.º 18
0
    def ufl_evaluate(self, x, component, derivatives):
        """Function used by ufl to evaluate the Expression"""
        # FIXME: same as dolfinx.expression.Expression version. Find way
        # to re-use.
        assert derivatives == ()  # TODO: Handle derivatives

        if component:
            shape = self.ufl_shape
            assert len(shape) == len(component)
            value_size = ufl.product(shape)
            index = ufl.utils.indexflattening.flatten_multiindex(
                component, ufl.utils.indexflattening.shape_to_strides(shape))
            values = np.zeros(value_size)
            # FIXME: use a function with a return value
            self(*x, values=values)
            return values[index]
        else:
            # Scalar evaluation
            return self(*x)
Exemplo n.º 19
0
    def __call__(self, x: np.ndarray) -> np.ndarray:
        """Evaluate Function at points x, where x has shape (num_points, gdim)"""
        _x = np.asarray(x, dtype=np.float)
        num_points = _x.shape[0] if len(_x.shape) > 1 else 1
        _x = np.reshape(_x, (num_points, -1))
        if _x.shape[1] != self.geometric_dimension():
            raise ValueError("Wrong geometric dimension for coordinate(s).")

        value_size = ufl.product(self.ufl_element().value_shape())
        if common.has_petsc_complex:
            values = np.empty((num_points, value_size), dtype=np.complex128)
        else:
            values = np.empty((num_points, value_size))

        # Call the evaluation
        self._cpp_object.eval(values, _x)
        if num_points == 1:
            values = np.reshape(values, (-1, ))

        return values
Exemplo n.º 20
0
    def generate(self):
        L = self.backend.language

        parts = []

        parts += self.generate_element_tables()
        parts += self.generate_unstructured_piecewise_partition()

        all_preparts = []
        all_quadparts = []
        all_postparts = []

        preparts, quadparts, postparts = self.generate_quadrature_loop()
        all_preparts += preparts
        all_quadparts += quadparts
        all_postparts += postparts

        preparts, quadparts, postparts = self.generate_dofblock_partition(
            quadrature_independent=True)
        all_preparts += preparts
        all_quadparts += quadparts
        all_postparts += postparts

        all_finalizeparts = []

        # Initialize a tensor to zeros
        A_values = [0.0
                    ] * ufl.product(self.ir.expression_shape +
                                    [self.num_points] + self.ir.tensor_shape)
        all_finalizeparts = self.generate_tensor_value_initialization(A_values)

        # Generate code to add reusable blocks B* to element tensor A
        all_finalizeparts += self.generate_copyout_statements()

        # Collect parts before, during, and after quadrature loops
        parts += all_preparts
        parts += all_quadparts
        parts += all_postparts
        parts += all_finalizeparts

        return L.StatementList(parts)
Exemplo n.º 21
0
def _write_to_file(fun, directory, filename, suffix, components=None):
    if components is not None:
        filename = filename + "_component_" + "".join(components)
        function_name = "function_" + "".join(components)
    else:
        function_name = "function"
    fun_rank = fun.value_rank()
    fun_dim = product(fun.value_shape())
    assert fun_rank <= 2
    if ((fun_rank is 1 and fun_dim not in (2, 3))
            or (fun_rank is 2 and fun_dim not in (4, 9))):
        funs = fun.split(deepcopy=True)
        for (i, fun_i) in enumerate(funs):
            if components is not None:
                filename_i = filename + "_subcomponent_" + str(i)
            else:
                filename_i = filename + "_component_" + str(i)
            _write_to_file(fun_i, directory, filename_i, suffix, None)
    else:
        if suffix is not None:
            if suffix is 0:
                # Remove existing files if any, as new functions should not be appended, but rather overwrite existing functions
                SolutionFile.remove_files(directory, filename)
                # Remove from storage and re-create
                try:
                    del _all_solution_files[(directory, filename)]
                except KeyError:
                    pass
                _all_solution_files[(directory, filename)] = SolutionFile(
                    directory, filename)
            file_ = _all_solution_files[(directory, filename)]
            file_.write(fun, function_name, suffix)
        else:
            # Remove existing files if any, as new functions should not be appended, but rather overwrite existing functions
            SolutionFile.remove_files(directory, filename)
            # Write function to file
            file_ = SolutionFile(directory, filename)
            file_.write(fun, function_name, 0)
Exemplo n.º 22
0
    def form_argument(self, v):
        """Create new symbols for expressions that represent new values."""
        symmetry = v.ufl_element().symmetry()

        if symmetry:
            # Build symbols with symmetric components skipped
            symbols = []
            mapped_symbols = {}
            for c in ufl.permutation.compute_indices(v.ufl_shape):
                # Build mapped component mc with symmetries from element considered
                mc = symmetry.get(c, c)

                # Get existing symbol or create new and store with mapped component mc as key
                s = mapped_symbols.get(mc)
                if s is None:
                    s = self.new_symbol()
                    mapped_symbols[mc] = s
                symbols.append(s)
        else:
            n = ufl.product(v.ufl_shape + v.ufl_index_dimensions)
            symbols = self.new_symbols(n)

        return symbols
Exemplo n.º 23
0
def map_indexed_arg_components(indexed):
    """Build integer list mapping between flattended components
    of indexed expression and its underlying tensor-valued subexpression."""

    assert isinstance(indexed, Indexed)

    # AKA indexed = tensor[multiindex]
    tensor, multiindex = indexed.ufl_operands

    # AKA e1 = e2[multiindex]
    # (this renaming is historical, but kept for consistency with all the variables *1,*2 below)
    e2 = tensor
    e1 = indexed

    # Get tensor and index shape
    sh1 = e1.ufl_shape
    sh2 = e2.ufl_shape
    fi1 = e1.ufl_free_indices
    fi2 = e2.ufl_free_indices
    fid1 = e1.ufl_index_dimensions
    fid2 = e2.ufl_index_dimensions

    # Compute regular and total shape
    tsh1 = sh1 + fid1
    tsh2 = sh2 + fid2
    # r1 = len(tsh1)
    r2 = len(tsh2)
    # str1 = shape_to_strides(tsh1)
    str2 = shape_to_strides(tsh2)
    assert not sh1
    assert sh2  # Must have shape to be indexed in the first place
    assert product(tsh1) <= product(tsh2)

    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
    ind2_to_ind1_map = [None] * len(fi2)
    for k, i in enumerate(fi2):
        ind2_to_ind1_map[k] = fi1.index(i)

    # Build map from fi1/fid1 position to mi position
    nmui = len(multiindex)
    multiindex_to_ind1_map = [None] * nmui
    for k, i in enumerate(multiindex):
        if isinstance(i, Index):
            multiindex_to_ind1_map[k] = fi1.index(i.count())

    # Build map from flattened e1 component to flattened e2 component
    perm1 = compute_indices(tsh1)
    ni1 = product(tsh1)

    # Situation: e1 = e2[mi]
    d1 = [None] * ni1
    p2 = [None] * r2
    assert len(sh2) == nmui
    for k, i in enumerate(multiindex):
        if isinstance(i, FixedIndex):
            p2[k] = int(i)
    for c1, p1 in enumerate(perm1):
        for k, i in enumerate(multiindex):
            if isinstance(i, Index):
                p2[k] = p1[multiindex_to_ind1_map[k]]
        for k, i in enumerate(ind2_to_ind1_map):
            p2[nmui + k] = p1[i]
        c2 = flatten_multiindex(p2, str2)
        d1[c1] = c2

    # Consistency checks
    assert all(isinstance(x, int) for x in d1)
    assert len(set(d1)) == len(d1)
    return d1
Exemplo n.º 24
0
def reference_value_size(L, reference_value_shape):
    return L.Return(ufl.product(reference_value_shape))
Exemplo n.º 25
0
    def _modified_terminal(self, v):
        """Handle modified terminal.

        Modifiers:
        ---------
        terminal           - the underlying Terminal object
        global_derivatives - tuple of ints, each meaning derivative in that global direction
        local_derivatives  - tuple of ints, each meaning derivative in that local direction
        reference_value    - bool, whether this is represented in reference frame
        averaged           - None, 'facet' or 'cell'
        restriction        - None, '+' or '-'
        component          - tuple of ints, the global component of the Terminal
        flat_component     - single int, flattened local component of the Terminal, considering symmetry

        """
        # (1) mt.terminal.ufl_shape defines a core indexing space UNLESS mt.reference_value,
        #     in which case the reference value shape of the element must be used.
        # (2) mt.terminal.ufl_element().symmetry() defines core symmetries
        # (3) averaging and restrictions define distinct symbols, no additional symmetries
        # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries

        # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component
        # to analyse, which should be sufficient to get the base shape and derivatives
        if v.ufl_shape:
            mt = analyse_modified_terminal(v[(0, ) * len(v.ufl_shape)])
        else:
            mt = analyse_modified_terminal(v)

        # Get derivatives
        num_ld = len(mt.local_derivatives)
        num_gd = len(mt.global_derivatives)
        assert not (num_ld and num_gd)
        if num_ld:
            domain = mt.terminal.ufl_domain()
            tdim = domain.topological_dimension()
            d_components = ufl.permutation.compute_indices((tdim, ) * num_ld)
        elif num_gd:
            domain = mt.terminal.ufl_domain()
            gdim = domain.geometric_dimension()
            d_components = ufl.permutation.compute_indices((gdim, ) * num_gd)
        else:
            d_components = [()]

        # Get base shape without the derivative axes
        base_components = ufl.permutation.compute_indices(mt.base_shape)

        # Build symbols with symmetric components and derivatives skipped
        symbols = []
        mapped_symbols = {}
        for bc in base_components:
            for dc in d_components:
                # Build mapped component mc with symmetries from element
                # and derivatives combined
                mbc = mt.base_symmetry.get(bc, bc)
                mdc = tuple(sorted(dc))
                mc = mbc + mdc

                # Get existing symbol or create new and store with
                # mapped component mc as key
                s = mapped_symbols.get(mc)
                if s is None:
                    s = self.new_symbol()
                    mapped_symbols[mc] = s
                symbols.append(s)

        # Consistency check before returning symbols
        assert not v.ufl_free_indices
        if ufl.product(v.ufl_shape) != len(symbols):
            raise RuntimeError("Internal error in value numbering.")
        return symbols
Exemplo n.º 26
0
def map_indexed_arg_components(indexed):
    """Build a map from flattened components to subexpression.

    Builds integer list mapping between flattened components
    of indexed expression and its underlying tensor-valued subexpression."""

    assert isinstance(indexed, Indexed)

    # AKA indexed = tensor[multiindex]
    tensor, multiindex = indexed.ufl_operands

    # AKA e1 = e2[multiindex]
    # (this renaming is historical, but kept for consistency with all the variables *1,*2 below)
    e2 = tensor
    e1 = indexed

    # Get tensor and index shape
    sh1 = e1.ufl_shape
    sh2 = e2.ufl_shape
    fi1 = e1.ufl_free_indices
    fi2 = e2.ufl_free_indices
    fid1 = e1.ufl_index_dimensions
    fid2 = e2.ufl_index_dimensions

    # Compute regular and total shape
    tsh1 = sh1 + fid1
    tsh2 = sh2 + fid2
    # r1 = len(tsh1)
    r2 = len(tsh2)
    # str1 = shape_to_strides(tsh1)
    str2 = shape_to_strides(tsh2)
    assert not sh1
    assert sh2  # Must have shape to be indexed in the first place
    assert ufl.product(tsh1) <= ufl.product(tsh2)

    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
    ind2_to_ind1_map = [None] * len(fi2)
    for k, i in enumerate(fi2):
        ind2_to_ind1_map[k] = fi1.index(i)

    # Build map from fi1/fid1 position to mi position
    nmui = len(multiindex)
    multiindex_to_ind1_map = [None] * nmui
    for k, i in enumerate(multiindex):
        if isinstance(i, Index):
            multiindex_to_ind1_map[k] = fi1.index(i.count())

    # Build map from flattened e1 component to flattened e2 component
    perm1 = compute_indices(tsh1)
    ni1 = ufl.product(tsh1)

    # Situation: e1 = e2[mi]
    d1 = [None] * ni1
    p2 = [None] * r2
    assert len(sh2) == nmui
    for k, i in enumerate(multiindex):
        if isinstance(i, FixedIndex):
            p2[k] = int(i)
    for c1, p1 in enumerate(perm1):
        for k, i in enumerate(multiindex):
            if isinstance(i, Index):
                p2[k] = p1[multiindex_to_ind1_map[k]]
        for k, i in enumerate(ind2_to_ind1_map):
            p2[nmui + k] = p1[i]
        c2 = flatten_multiindex(p2, str2)
        d1[c1] = c2

    # Consistency checks
    assert all(isinstance(x, int) for x in d1)
    assert len(set(d1)) == len(d1)
    return d1
Exemplo n.º 27
0
    def __call__(self, *args, **kwargs):
        # GNW: This function is copied from the old DOLFIN Python
        # code. It is far too complicated. There is no need to provide
        # so many ways of doing the same thing.
        #
        # Deprecate as many options as possible, and maybe share with
        # dolfin.expression.Expresssion.

        if len(args) == 0:
            raise TypeError("expected at least 1 argument")

        # Test for ufl restriction
        if len(args) == 1 and isinstance(args[0], str):
            if args[0] in ('+', '-'):
                return ufl.Coefficient.__call__(self, *args)

        # Test for ufl mapping
        if len(args) == 2 and isinstance(args[1], dict) and self in args[1]:
            return ufl.Coefficient.__call__(self, *args)

        # Some help variables
        value_size = ufl.product(self.ufl_element().value_shape())

        # If values (return argument) is passed, check the type and length
        values = kwargs.get("values", None)
        if values is not None:
            if not isinstance(values, np.ndarray):
                raise TypeError("expected a NumPy array for 'values'")
            if len(values) != value_size or \
               not np.issubdtype(values.dtype, np.float64):
                raise TypeError("expected a double NumPy array of length"
                                " %d for return values." % value_size)
            values_provided = True
        else:
            values_provided = False
            values = np.zeros(value_size, dtype='d')

        # Get the geometric dimension we live in
        dim = self.ufl_domain().geometric_dimension()

        # Assume all args are x argument
        x = args

        # If only one x argument has been provided, unpack it if it's
        # an iterable
        if len(x) == 1:
            if isinstance(x[0], cpp.geometry.Point):
                x = [x[0][i] for i in range(dim)]
            elif hasattr(x[0], '__iter__'):
                x = x[0]

        # Convert it to an 1D numpy array
        try:
            x = np.fromiter(x, 'd')
        except (TypeError, ValueError, AssertionError):
            raise TypeError("expected scalar arguments for the coordinates")

        if len(x) == 0:
            raise TypeError("coordinate argument too short")

        if len(x) != dim:
            raise TypeError("expected the geometry argument to be of "
                            "length %d" % dim)

        # The actual evaluation
        self._cpp_object.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1 and not values_provided:
            return values[0]

        return values
Exemplo n.º 28
0
 def reference_value_size(self, L, reference_value_shape):
     return L.Return(product(reference_value_shape))
Exemplo n.º 29
0
 def value_size(self, L, value_shape):
     return L.Return(product(value_shape))
Exemplo n.º 30
0
    def __call__(self, *args, **kwargs):
        # GNW: This function is copied from the old DOLFIN Python
        # code. It is far too complicated. There is no need to provide
        # so many ways of doing the same thing.
        #
        # Deprecate as many options as possible

        if len(args) == 0:
            raise TypeError("expected at least 1 argument")

        # Test for ufl restriction
        if len(args) == 1 and isinstance(args[0], str):
            if args[0] in ('+', '-'):
                return ufl.Coefficient.__call__(self, *args)

        # Test for ufl mapping
        if len(args) == 2 and isinstance(args[1], dict) and self in args[1]:
            return ufl.Coefficient.__call__(self, *args)

        # Some help variables
        value_size = product(self.ufl_element().value_shape())

        # If values (return argument) is passed, check the type and
        # length
        values = kwargs.get("values", None)
        if values is not None:
            if not isinstance(values, numpy.ndarray):
                raise TypeError("expected a NumPy array for 'values'")
            if len(values) != value_size or not numpy.issubdtype(values.dtype, numpy.float64):
                raise TypeError("expected a double NumPy array of length"
                                " %d for return values." % value_size)
            values_provided = True
        else:
            values_provided = False
            values = numpy.zeros(value_size, dtype='d')

        # Get dim if element has any domains
        cell = self.ufl_element().cell()
        dim = None if cell is None else cell.geometric_dimension()

        # Assume all args are x argument
        x = args

        # If only one x argument has been provided, unpack it if it's
        # an iterable
        if len(x) == 1:
            if isinstance(x[0], cpp.geometry.Point):
                if dim is not None:
                    x = [x[0][i] for i in range(dim)]
                else:
                    x = [x[0][i] for i in range(3)]
            elif hasattr(x[0], '__iter__'):
                x = x[0]

        # Convert it to an 1D numpy array
        try:
            x = numpy.fromiter(x, 'd')
        except (TypeError, ValueError, AssertionError):
            raise TypeError("expected scalar arguments for the coordinates")

        if len(x) == 0:
            raise TypeError("coordinate argument too short")

        if dim is None:
            # Disabled warning as it breaks py.test due to excessive
            # output, and that code that is warned about is still
            # officially supported. See
            # https://bitbucket.org/fenics-project/dolfin/issues/355/
            # warning("Evaluating an Expression without knowing the right geometric dimension, assuming %d is correct." % len(x))
            pass
        else:
            if len(x) != dim:
                raise TypeError("expected the geometry argument to be of "
                                "length %d" % dim)

        # The actual evaluation
        self._cpp_object.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1 and not values_provided:
            return values[0]

        return values
Exemplo n.º 31
0
    def __call__(self, *args, **kwargs):
        # GNW: This function is copied from the old DOLFIN Python
        # code. It is far too complicated. There is no need to provide
        # so many ways of doing the same thing.
        #
        # Deprecate as many options as possible, and maybe share with
        # dolfin.expression.Expresssion.

        if len(args) == 0:
            raise TypeError("expected at least 1 argument")

        # Test for ufl restriction
        if len(args) == 1 and isinstance(args[0], str):
            if args[0] in ('+', '-'):
                return ufl.Coefficient.__call__(self, *args)

        # Test for ufl mapping
        if len(args) == 2 and isinstance(args[1], dict) and self in args[1]:
            return ufl.Coefficient.__call__(self, *args)

        # Some help variables
        value_size = ufl.product(self.ufl_element().value_shape())

        # If values (return argument) is passed, check the type and length
        values = kwargs.get("values", None)
        if values is not None:
            if not isinstance(values, np.ndarray):
                raise TypeError("expected a NumPy array for 'values'")
            if len(values) != value_size or \
               not np.issubdtype(values.dtype, np.float64):
                raise TypeError("expected a double NumPy array of length"
                                " %d for return values." % value_size)
            values_provided = True
        else:
            values_provided = False
            values = np.zeros(value_size, dtype='d')

        # Get the geometric dimension we live in
        dim = self.ufl_domain().geometric_dimension()

        # Assume all args are x argument
        x = args

        # If only one x argument has been provided, unpack it if it's
        # an iterable
        if len(x) == 1:
            if isinstance(x[0], cpp.geometry.Point):
                x = [x[0][i] for i in range(dim)]
            elif hasattr(x[0], '__iter__'):
                x = x[0]

        # Convert it to an 1D numpy array
        try:
            x = np.fromiter(x, 'd')
        except (TypeError, ValueError, AssertionError):
            raise TypeError("expected scalar arguments for the coordinates")

        if len(x) == 0:
            raise TypeError("coordinate argument too short")

        if len(x) != dim:
            raise TypeError("expected the geometry argument to be of "
                            "length %d" % dim)

        # The actual evaluation
        self._cpp_object.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1 and not values_provided:
            return values[0]

        return values
Exemplo n.º 32
0
def compile_expression1(expr):
    code = "double values[%d];" % (product(expr.ufl_shape),)
    return code
Exemplo n.º 33
0
    def generate_preintegrated_dofblock_partition(self):
        # FIXME: Generalize this to unrolling all A[] += ... loops,
        # or all loops with noncontiguous DM??
        L = self.backend.language

        block_contributions = self.ir.piecewise_ir["block_contributions"]

        blocks = [
            (blockmap, blockdata)
            for blockmap, contributions in sorted(block_contributions.items())
            for blockdata in contributions
            if blockdata.block_mode == "preintegrated"
        ]

        # Get symbol, dimensions, and loop index symbols for A
        A_shape = self.ir.tensor_shape
        A_size = ufl.product(A_shape)
        A_rank = len(A_shape)

        # TODO: there's something like shape2strides(A_shape) somewhere
        # A_strides = ufl.utils.indexflattening.shape_to_strides(A_shape)

        A_strides = [1] * A_rank
        for i in reversed(range(0, A_rank - 1)):
            A_strides[i] = A_strides[i + 1] * A_shape[i + 1]

        A_values = [0.0] * A_size

        for blockmap, blockdata in blocks:
            # Accumulate A[blockmap[...]] += f*PI[...]

            # Get table for inlining
            tables = self.ir.unique_tables
            table = tables[blockdata.name]
            inline_table = self.ir.integral_type == "cell"

            # Get factor expression
            v = self.ir.piecewise_ir["factorization"].nodes[
                blockdata.factor_index]['expression']
            f = self.get_var(None, v)

            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
            # A_rhs = f * PI where PI = sum_q weight * u * v
            PI = L.Symbol(blockdata.name)

            # Define indices into preintegrated block
            P_entity_indices = self.get_entities(blockdata)
            if inline_table:
                assert P_entity_indices == (L.LiteralInt(0), )
                assert table.shape[0] == 1

            # Unroll loop
            blockshape = [len(DM) for DM in blockmap]
            blockrange = [range(d) for d in blockshape]

            for ii in itertools.product(*blockrange):
                A_ii = sum(A_strides[i] * blockmap[i][ii[i]]
                           for i in range(len(ii)))
                if blockdata.transposed:
                    P_arg_indices = (ii[1], ii[0])
                else:
                    P_arg_indices = ii

                if inline_table:
                    # Extract float value of PI[P_ii]
                    Pval = table[0]  # always entity 0
                    for i in P_arg_indices:
                        Pval = Pval[i]
                    A_rhs = Pval * f
                else:
                    # Index the static preintegrated table:
                    P_ii = P_entity_indices + P_arg_indices
                    A_rhs = f * PI[P_ii]

                A_values[A_ii] = A_values[A_ii] + A_rhs

        code = self.generate_tensor_value_initialization(A_values)
        return L.commented_code_list(code, "UFLACS block mode: preintegrated")
Exemplo n.º 34
0
 def value_size(self, L, value_shape):
     return L.Return(product(value_shape))
Exemplo n.º 35
0
    def _modified_terminal(self, v, i):
        """Modifiers:
        terminal           - the underlying Terminal object
        global_derivatives - tuple of ints, each meaning derivative in that global direction
        local_derivatives  - tuple of ints, each meaning derivative in that local direction
        reference_value    - bool, whether this is represented in reference frame
        averaged           - None, 'facet' or 'cell'
        restriction        - None, '+' or '-'
        component          - tuple of ints, the global component of the Terminal
        flat_component     - single int, flattened local component of the Terminal, considering symmetry
        """
        # (1) mt.terminal.ufl_shape defines a core indexing space UNLESS mt.reference_value,
        #     in which case the reference value shape of the element must be used.
        # (2) mt.terminal.ufl_element().symmetry() defines core symmetries
        # (3) averaging and restrictions define distinct symbols, no additional symmetries
        # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries

        # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component
        # to analyse, which should be sufficient to get the base shape and derivatives
        if v.ufl_shape:
            mt = analyse_modified_terminal(v[(0,) * len(v.ufl_shape)])
        else:
            mt = analyse_modified_terminal(v)

        # Get derivatives
        num_ld = len(mt.local_derivatives)
        num_gd = len(mt.global_derivatives)
        assert not (num_ld and num_gd)
        if num_ld:
            domain = mt.terminal.ufl_domain()
            tdim = domain.topological_dimension()
            d_components = compute_indices((tdim,) * num_ld)
        elif num_gd:
            domain = mt.terminal.ufl_domain()
            gdim = domain.geometric_dimension()
            d_components = compute_indices((gdim,) * num_gd)
        else:
            d_components = [()]

        # Get base shape without the derivative axes
        base_components = compute_indices(mt.base_shape)

        # Build symbols with symmetric components and derivatives skipped
        symbols = []
        mapped_symbols = {}
        for bc in base_components:
            for dc in d_components:
                # Build mapped component mc with symmetries from element and derivatives combined
                mbc = mt.base_symmetry.get(bc, bc)
                mdc = tuple(sorted(dc))
                mc = mbc + mdc

                # Get existing symbol or create new and store with mapped component mc as key
                s = mapped_symbols.get(mc)
                if s is None:
                    s = self.new_symbol()
                    mapped_symbols[mc] = s
                symbols.append(s)

        # Consistency check before returning symbols
        assert not v.ufl_free_indices
        if product(v.ufl_shape) != len(symbols):
            error("Internal error in value numbering.")
        return symbols
Exemplo n.º 36
0
 def reference_value_size(self, L, reference_value_shape):
     return L.Return(product(reference_value_shape))
Exemplo n.º 37
0
def map_component_tensor_arg_components(tensor):
    """Build a map from flattened components to subexpression.

    Builds integer list mapping between flattended components
    of tensor and its underlying indexed subexpression."""

    assert isinstance(tensor, ComponentTensor)

    # AKA tensor = as_tensor(indexed, multiindex)
    indexed, multiindex = tensor.ufl_operands

    e1 = indexed
    e2 = tensor  # e2 = as_tensor(e1, multiindex)
    mi = [i for i in multiindex if isinstance(i, Index)]

    # Get tensor and index shapes
    sh1 = e1.ufl_shape  # (sh)ape of e1
    sh2 = e2.ufl_shape  # (sh)ape of e2
    fi1 = e1.ufl_free_indices  # (f)ree (i)ndices of e1
    fi2 = e2.ufl_free_indices  # ...
    fid1 = e1.ufl_index_dimensions  # (f)ree (i)ndex (d)imensions of e1
    fid2 = e2.ufl_index_dimensions  # ...

    # Compute total shape (tsh) of e1 and e2
    tsh1 = sh1 + fid1
    tsh2 = sh2 + fid2
    r1 = len(tsh1)  # 'total rank' or e1
    r2 = len(tsh2)  # ...
    str1 = shape_to_strides(tsh1)
    assert not sh1
    assert sh2
    assert len(mi) == len(multiindex)
    assert ufl.product(tsh1) == ufl.product(tsh2)
    assert fi1

    assert all(i in fi1 for i in fi2)

    nmui = len(multiindex)
    assert nmui == len(sh2)

    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
    p2_to_p1_map = [None] * r2
    for k, i in enumerate(fi2):
        p2_to_p1_map[k + nmui] = fi1.index(i)

    # Build map from fi1/fid1 position to mi position
    for k, i in enumerate(mi):
        p2_to_p1_map[k] = fi1.index(mi[k].count())

    # Build map from flattened e1 component to flattened e2 component
    perm2 = compute_indices(tsh2)
    ni2 = ufl.product(tsh2)

    # Situation: e2 = as_tensor(e1, mi)
    d2 = [None] * ni2
    p1 = [None] * r1
    for c2, p2 in enumerate(perm2):
        for k2, k1 in enumerate(p2_to_p1_map):
            p1[k1] = p2[k2]
        c1 = flatten_multiindex(p1, str1)
        d2[c2] = c1

    # Consistency checks
    assert all(isinstance(x, int) for x in d2)
    assert len(set(d2)) == len(d2)
    return d2
Exemplo n.º 38
0
    def __call__(self, *args, **kwargs):
        """
        Evaluates the Function.

        *Examples*
            1) Using an iterable as x:

              .. code-block:: python

                  fs = Expression("sin(x[0])*cos(x[1])*sin(x[3])")
                  x0 = (1.,0.5,0.5)
                  x1 = [1.,0.5,0.5]
                  x2 = numpy.array([1.,0.5,0.5])
                  v0 = fs(x0)
                  v1 = fs(x1)
                  v2 = fs(x2)

            2) Using multiple scalar args for x, interpreted as a
            point coordinate

              .. code-block:: python

                  v0 = f(1.,0.5,0.5)

            3) Using a Point

              .. code-block:: python

                  p0 = Point(1.,0.5,0.5)
                  v0 = f(p0)

            3) Passing return array

              .. code-block:: python

                  fv = Expression(("sin(x[0])*cos(x[1])*sin(x[3])",
                               "2.0","0.0"))
                  x0 = numpy.array([1.,0.5,0.5])
                  v0 = numpy.zeros(3)
                  fv(x0, values = v0)

              .. note::

                  A longer values array may be passed. In this way one can fast
                  fill up an array with different evaluations.

              .. code-block:: python

                  values = numpy.zeros(9)
                  for i in xrange(0,10,3):
                      fv(x[i:i+3], values = values[i:i+3])

        """

        if len(args)==0:
            raise TypeError("expected at least 1 argument")

        # Test for ufl restriction
        if len(args) == 1 and args[0] in ('+','-'):
            return ufl.Coefficient.__call__(self, *args)

        # Test for ufl mapping
        if len(args) == 2 and isinstance(args[1], dict) and self in args[1]:
            return ufl.Coefficient.__call__(self, *args)

        # Some help variables
        value_size = product(self.ufl_element().value_shape())

        # If values (return argument) is passed, check the type and length
        values = kwargs.get("values", None)
        if values is not None:
            if not isinstance(values, numpy.ndarray):
                raise TypeError("expected a NumPy array for 'values'")
            if len(values) != value_size or \
                   not numpy.issubdtype(values.dtype, 'd'):
                raise TypeError("expected a double NumPy array of length"\
                      " %d for return values."%value_size)
            values_provided = True
        else:
            values_provided = False
            values = numpy.zeros(value_size, dtype='d')

        # Get the geometric dimension we live in
        dim = self.ufl_domain().geometric_dimension()

        # Assume all args are x argument
        x = args

        # If only one x argument has been provided, unpack it if it's
        # an iterable
        if len(x) == 1:
            if isinstance(x[0], cpp.Point):
                x = [x[0][i] for i in range(dim)]
            elif hasattr(x[0], '__iter__'):
                x = x[0]

        # Convert it to an 1D numpy array
        try:
            x = numpy.fromiter(x, 'd')
        except (TypeError, ValueError, AssertionError) as e:
            raise TypeError("expected scalar arguments for the coordinates")

        if len(x) == 0:
            raise TypeError("coordinate argument too short")

        if len(x) != dim:
            raise TypeError("expected the geometry argument to be of "\
                  "length %d"%dim)

        # The actual evaluation
        self.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1 and not values_provided:
            return values[0]

        return values
Exemplo n.º 39
0
    def __call__(self, *args, **kwargs):
        """
        Evaluates the Function.

        *Examples*
            1) Using an iterable as x:

              .. code-block:: python

                  fs = Expression("sin(x[0])*cos(x[1])*sin(x[3])")
                  x0 = (1.,0.5,0.5)
                  x1 = [1.,0.5,0.5]
                  x2 = numpy.array([1.,0.5,0.5])
                  v0 = fs(x0)
                  v1 = fs(x1)
                  v2 = fs(x2)

            2) Using multiple scalar args for x, interpreted as a
            point coordinate

              .. code-block:: python

                  v0 = f(1.,0.5,0.5)

            3) Using a Point

              .. code-block:: python

                  p0 = Point(1.,0.5,0.5)
                  v0 = f(p0)

            3) Passing return array

              .. code-block:: python

                  fv = Expression(("sin(x[0])*cos(x[1])*sin(x[3])",
                               "2.0","0.0"))
                  x0 = numpy.array([1.,0.5,0.5])
                  v0 = numpy.zeros(3)
                  fv(x0, values = v0)

              .. note::

                  A longer values array may be passed. In this way one can fast
                  fill up an array with different evaluations.

              .. code-block:: python

                  values = numpy.zeros(9)
                  for i in xrange(0,10,3):
                      fv(x[i:i+3], values = values[i:i+3])

        """

        if len(args) == 0:
            raise TypeError("expected at least 1 argument")

        # Test for ufl restriction
        if len(args) == 1 and isinstance(args[0], string_types):
            if args[0] in ('+', '-'):
                return ufl.Coefficient.__call__(self, *args)

        # Test for ufl mapping
        if len(args) == 2 and isinstance(args[1], dict) and self in args[1]:
            return ufl.Coefficient.__call__(self, *args)

        # Some help variables
        value_size = product(self.ufl_element().value_shape())

        # If values (return argument) is passed, check the type and length
        values = kwargs.get("values", None)
        if values is not None:
            if not isinstance(values, numpy.ndarray):
                raise TypeError("expected a NumPy array for 'values'")
            if len(values) != value_size or \
                   not numpy.issubdtype(values.dtype, 'd'):
                raise TypeError("expected a double NumPy array of length"\
                      " %d for return values."%value_size)
            values_provided = True
        else:
            values_provided = False
            values = numpy.zeros(value_size, dtype='d')

        # Get the geometric dimension we live in
        dim = self.ufl_domain().geometric_dimension()

        # Assume all args are x argument
        x = args

        # If only one x argument has been provided, unpack it if it's
        # an iterable
        if len(x) == 1:
            if isinstance(x[0], cpp.Point):
                x = [x[0][i] for i in range(dim)]
            elif hasattr(x[0], '__iter__'):
                x = x[0]

        # Convert it to an 1D numpy array
        try:
            x = numpy.fromiter(x, 'd')
        except (TypeError, ValueError, AssertionError) as e:
            raise TypeError("expected scalar arguments for the coordinates")

        if len(x) == 0:
            raise TypeError("coordinate argument too short")

        if len(x) != dim:
            raise TypeError("expected the geometry argument to be of "\
                  "length %d"%dim)

        # The actual evaluation
        self.eval(values, x)

        # If scalar return statement, return scalar value.
        if value_size == 1 and not values_provided:
            return values[0]

        return values
Exemplo n.º 40
0
def _generate_element_code(ir, parameters):
    "Generate code for finite element from intermediate representation."

    # Skip code generation if ir is None
    if ir is None:
        return None

    # Prefetch formatting to speedup code generation
    ret = format["return"]
    do_nothing = format["do nothing"]
    create = format["create foo"]

    # Codes generated together
    (evaluate_dof_code, evaluate_dofs_code) \
        = evaluate_dof_and_dofs(ir["evaluate_dof"])

    element_number = ir["id"]

    # Generate code
    code = {}
    code["classname"] = ir["classname"]
    code["members"] = ""
    code["constructor"] = do_nothing
    code["constructor_arguments"] = ""
    code["initializer_list"] = ""
    code["destructor"] = do_nothing

    code["signature"] = ret('"%s"' % ir["signature"])
    code["cell_shape"] = ret(format["cell"](ir["cell"].cellname()))
    code["topological_dimension"] = ret(ir["cell"].topological_dimension())
    code["geometric_dimension"] = ret(ir["cell"].geometric_dimension())
    code["space_dimension"] = ret(ir["space_dimension"])

    code["value_rank"] = ret(len(ir["value_dimension"]))
    code["value_dimension"] = _value_dimension(ir["value_dimension"])
    code["value_size"] = ret(product(ir["value_dimension"]))
    code["reference_value_rank"] = ret(len(ir["reference_value_dimension"]))
    code["reference_value_dimension"] = _value_dimension(ir["reference_value_dimension"])
    code["reference_value_size"] = ret(product(ir["reference_value_dimension"]))

    code["evaluate_basis"] = _evaluate_basis(ir["evaluate_basis"])
    code["evaluate_basis_all"] = _evaluate_basis_all(ir["evaluate_basis"])
    code["evaluate_basis_derivatives"] \
        = _evaluate_basis_derivatives(ir["evaluate_basis"])
    code["evaluate_basis_derivatives_all"] \
        = _evaluate_basis_derivatives_all(ir["evaluate_basis"])

    code["evaluate_dof"] = evaluate_dof_code
    code["evaluate_dofs"] = evaluate_dofs_code
    code["interpolate_vertex_values"] \
        = interpolate_vertex_values(ir["interpolate_vertex_values"])
    code["tabulate_dof_coordinates"] \
        = _tabulate_dof_coordinates(ir["tabulate_dof_coordinates"])
    code["num_sub_elements"] = ret(ir["num_sub_elements"])
    code["create_sub_element"] = _create_sub_element(ir)
    code["create"] = ret(create(code["classname"]))

    # Postprocess code
    _postprocess_code(code, parameters)

    return code
Exemplo n.º 41
0
 def expr(self, v):
     """Create new symbols for expressions that represent new values."""
     n = ufl.product(v.ufl_shape + v.ufl_index_dimensions)
     return self.new_symbols(n)
Exemplo n.º 42
0
def map_component_tensor_arg_components(tensor):
    """Build integer list mapping between flattended components
    of tensor and its underlying indexed subexpression."""

    assert isinstance(tensor, ComponentTensor)

    # AKA tensor = as_tensor(indexed, multiindex)
    indexed, multiindex = tensor.ufl_operands

    e1 = indexed
    e2 = tensor  # e2 = as_tensor(e1, multiindex)
    mi = [i for i in multiindex if isinstance(i, Index)]

    # Get tensor and index shapes
    sh1 = e1.ufl_shape  # (sh)ape of e1
    sh2 = e2.ufl_shape  # (sh)ape of e2
    fi1 = e1.ufl_free_indices  # (f)ree (i)ndices of e1
    fi2 = e2.ufl_free_indices  # ...
    fid1 = e1.ufl_index_dimensions  # (f)ree (i)ndex (d)imensions of e1
    fid2 = e2.ufl_index_dimensions  # ...

    # Compute total shape (tsh) of e1 and e2
    tsh1 = sh1 + fid1
    tsh2 = sh2 + fid2
    r1 = len(tsh1)  # 'total rank' or e1
    r2 = len(tsh2)  # ...
    str1 = shape_to_strides(tsh1)
    assert not sh1
    assert sh2
    assert len(mi) == len(multiindex)
    assert product(tsh1) == product(tsh2)
    assert fi1

    assert all(i in fi1 for i in fi2)

    nmui = len(multiindex)
    assert nmui == len(sh2)

    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
    p2_to_p1_map = [None] * r2
    for k, i in enumerate(fi2):
        p2_to_p1_map[k + nmui] = fi1.index(i)

    # Build map from fi1/fid1 position to mi position
    for k, i in enumerate(mi):
        p2_to_p1_map[k] = fi1.index(mi[k].count())

    # Build map from flattened e1 component to flattened e2 component
    perm2 = compute_indices(tsh2)
    ni2 = product(tsh2)

    # Situation: e2 = as_tensor(e1, mi)
    d2 = [None] * ni2
    p1 = [None] * r1
    for c2, p2 in enumerate(perm2):
        for k2, k1 in enumerate(p2_to_p1_map):
            p1[k1] = p2[k2]
        c1 = flatten_multiindex(p1, str1)
        d2[c2] = c1

    # Consistency checks
    assert all(isinstance(x, int) for x in d2)
    assert len(set(d2)) == len(d2)
    return d2
Exemplo n.º 43
0
    def generate_block_parts(self, blockmap, blockdata):
        """Generate and return code parts for a given block."""
        L = self.backend.language

        # The parts to return
        preparts = []
        quadparts = []

        block_rank = len(blockmap)
        blockdims = tuple(len(dofmap) for dofmap in blockmap)

        ttypes = blockdata.ttypes
        if "zeros" in ttypes:
            raise RuntimeError(
                "Not expecting zero arguments to be left in dofblock generation."
            )

        arg_indices = tuple(
            self.backend.symbols.argument_loop_index(i)
            for i in range(block_rank))

        F = self.ir.integrand[self.quadrature_rule]["factorization"]

        assert not blockdata.transposed, "Not handled yet"
        components = ufl.product(self.ir.expression_shape)

        num_points = self.quadrature_rule.points.shape[0]
        A_shape = self.ir.tensor_shape
        Asym = self.backend.symbols.element_tensor()
        A = L.FlattenedArray(Asym, dims=[components] + [num_points] + A_shape)

        iq = self.backend.symbols.quadrature_loop_index()

        # Prepend dimensions of dofmap block with free index
        # for quadrature points and expression components
        B_indices = tuple([iq] + list(arg_indices))

        # Fetch code to access modified arguments
        # An access to FE table data
        arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices)

        A_indices = []
        for i in range(len(blockmap)):
            offset = blockmap[i][0]
            A_indices.append(arg_indices[i] + offset)
        A_indices = tuple([iq] + A_indices)

        # Multiply collected factors
        # For each component of the factor expression
        # add result inside quadloop
        body = []

        for fi_ci in blockdata.factor_indices_comp_indices:
            f = self.get_var(F.nodes[fi_ci[0]]["expression"])
            Brhs = L.float_product([f] + arg_factors)
            body.append(L.AssignAdd(A[(fi_ci[1], ) + A_indices], Brhs))

        for i in reversed(range(block_rank)):
            body = L.ForRange(B_indices[i + 1], 0, blockdims[i], body=body)
        quadparts += [body]

        return preparts, quadparts
Exemplo n.º 44
0
def value_size(L, value_shape):
    return L.Return(ufl.product(value_shape))