コード例 #1
0
 def _basic_expression_name(expression):
     str_repr = ""
     visited = set()
     coefficients_replacement = dict()
     for n in wrapping.expression_iterator(expression):
         if n in visited:
             continue
         if has_pybind11():
             cppcode_attribute = "_cppcode"
         else:
             cppcode_attribute = "cppcode"
         if hasattr(n, cppcode_attribute):
             coefficients_replacement[repr(n)] = str(
                 getattr(n, cppcode_attribute))
             str_repr += repr(getattr(n, cppcode_attribute))
             visited.add(n)
         elif wrapping.is_problem_solution_or_problem_solution_component_type(
                 n):
             if wrapping.is_problem_solution_or_problem_solution_component(
                     n):
                 (preprocessed_n, component,
                  truth_solution) = wrapping.solution_identify_component(n)
                 problem = get_problem_from_solution(truth_solution)
             else:
                 (
                     problem, component
                 ) = wrapping.get_auxiliary_problem_for_non_parametrized_function(
                     n)
                 preprocessed_n = n
             coefficients_replacement[repr(preprocessed_n)] = str(
                 problem.name()) + str(component)
             str_repr += repr(problem.name()) + str(component)
             # Make sure to skip any parent solution related to this one
             visited.add(n)
             visited.add(preprocessed_n)
             for parent_n in wrapping.solution_iterator(preprocessed_n):
                 visited.add(parent_n)
         elif isinstance(n, Constant):
             if has_pybind11():
                 vals = n.values()
             else:
                 x = zeros(1)
                 vals = zeros(n.value_size())
                 n.eval(vals, x)
             coefficients_replacement[repr(n)] = str(vals)
             str_repr += repr(str(vals))
             visited.add(n)
         else:
             str_repr += repr(n)
             visited.add(n)
     for key, value in coefficients_replacement.items():
         str_repr = str_repr.replace(key, value)
     hash_code = hashlib.sha1(
         (str_repr + dolfin_version).encode("utf-8")).hexdigest(
         )  # similar to dolfin/compilemodules/compilemodule.py
     return hash_code
コード例 #2
0
ファイル: tensor_save.py プロジェクト: ljnpu/RBniCS
    def _basic_tensor_save(tensor, directory, filename):
        mpi_comm = tensor.mpi_comm()
        if not has_pybind11():
            mpi_comm = mpi_comm.tompi4py()
        form = tensor.generator._form
        # Write out generator
        assert hasattr(tensor, "generator")
        full_filename_generator = os.path.join(str(directory),
                                               filename + ".generator")
        form_name = wrapping.form_name(form)

        def save_generator():
            with open(full_filename_generator, "w") as generator_file:
                generator_file.write(form_name)

        parallel_io(save_generator, mpi_comm)
        # Write out generator mpi size
        full_filename_generator_mpi_size = os.path.join(
            str(directory), filename + ".generator_mpi_size")

        def save_generator_mpi_size():
            with open(full_filename_generator_mpi_size,
                      "w") as generator_mpi_size_file:
                generator_mpi_size_file.write(str(mpi_comm.size))

        parallel_io(save_generator_mpi_size, mpi_comm)
        # Write out generator mapping from processor dependent indices to processor independent (global_cell_index, cell_dof) tuple
        _permutation_save(tensor, directory, form,
                          form_name + "_" + str(mpi_comm.size), mpi_comm)
        # Write out content
        _tensor_save(tensor, directory, filename, mpi_comm)
コード例 #3
0
def make_comm(comm):
    if hasattr(dolfin, "has_pybind11") and dolfin.has_pybind11():
        return comm
    elif dolfin.__version__ >= "2018.1.0":
        return comm
    else:
        return PETSc.Comm(comm)
コード例 #4
0
 def _basic_is_time_dependent(expression_or_form, iterator):
     for node in iterator(expression_or_form):
         # ... parametrized expressions
         if isinstance(node, BaseExpression):
             if is_pull_back_expression(
                     node) and is_pull_back_expression_time_dependent(node):
                 return True
             else:
                 if has_pybind11():
                     parameters = node._parameters
                 else:
                     parameters = node.user_parameters
                 if "t" in parameters:
                     return True
         # ... problem solutions related to nonlinear terms
         elif wrapping.is_problem_solution_type(node):
             if wrapping.is_problem_solution(node):
                 (preprocessed_node, component, truth_solution
                  ) = wrapping.solution_identify_component(node)
                 truth_problem = get_problem_from_solution(truth_solution)
                 if hasattr(truth_problem, "set_time"):
                     return True
             elif wrapping.is_problem_solution_dot(node):
                 return True
     return False
コード例 #5
0
ファイル: test_utils.py プロジェクト: MiroK/multiphenics
def assert_block_vectors_equal(rhs, block_rhs, block_V):
    if isinstance(rhs, tuple):
        rhs1 = rhs[0]
        rhs2 = rhs[1]
    else:
        rhs1 = rhs
        rhs2 = None
    comm = block_rhs.mpi_comm()
    if not has_pybind11():
        comm = comm.tompi4py()
    if rhs2 is not None:
        map_block_to_original = gather_on_zero((block_V.block_dofmap().block_to_original(0), block_V.block_dofmap().block_to_original(1)), comm, block_dofmap=block_V.block_dofmap(), dofmap=(block_V[0].dofmap(), block_V[1].dofmap()))
        rhs1g = gather_on_zero(rhs1, comm)
        rhs2g = gather_on_zero(rhs2, comm)
        if comm.Get_rank() == 0:
            rhsg = bvec([rhs1g, rhs2g])
        else:
            rhsg = None
    else:
        map_block_to_original = gather_on_zero(block_V.block_dofmap().block_to_original(0), comm, block_dofmap=block_V.block_dofmap(), dofmap=block_V[0].dofmap())
        rhs1g = gather_on_zero(rhs1, comm)
        rhsg = rhs1g
    block_rhsg = gather_on_zero(block_rhs, comm)
    if comm.Get_rank() == 0:
        assert block_rhsg.shape[0] == len(map_block_to_original)
        rhsg_for_assert = block_rhsg*0.
        for (block, original) in map_block_to_original.items():
            rhsg_for_assert[block] = rhsg[original]
        assert array_equal(rhsg_for_assert, block_rhsg)
    comm.barrier()
コード例 #6
0
 def _basic_tensor_load(tensor, directory, filename):
     mpi_comm = tensor.mpi_comm()
     if not has_pybind11():
         mpi_comm = mpi_comm.tompi4py()
     form = tensor.generator._form
     load_failed = False
     # Read in generator
     full_filename_generator = os.path.join(str(directory), filename + ".generator")
     generator_string = None
     if is_io_process(mpi_comm):
         if os.path.exists(full_filename_generator):
             with open(full_filename_generator, "r") as generator_file:
                 generator_string = generator_file.readline()
         else:
             load_failed = True
     if mpi_comm.bcast(load_failed, root=is_io_process.root):
         raise OSError
     else:
         generator_string = mpi_comm.bcast(generator_string, root=is_io_process.root)
     # Read in generator mpi size
     full_filename_generator_mpi_size = os.path.join(str(directory), filename + ".generator_mpi_size")
     generator_mpi_size_string = None
     if is_io_process(mpi_comm):
         if os.path.exists(full_filename_generator_mpi_size):
             with open(full_filename_generator_mpi_size, "r") as generator_mpi_size_file:
                 generator_mpi_size_string = generator_mpi_size_file.readline()
         else:
             load_failed = True
     if mpi_comm.bcast(load_failed, root=is_io_process.root):
         raise OSError
     else:
         generator_mpi_size_string = mpi_comm.bcast(generator_mpi_size_string, root=is_io_process.root)
     # Read in generator mapping from processor dependent indices (at the time of saving) to processor independent (global_cell_index, cell_dof) tuple
     permutation = _permutation_load(tensor, directory, filename, form, generator_string + "_" + generator_mpi_size_string, mpi_comm)
     _tensor_load(tensor, directory, filename, permutation)
コード例 #7
0
ファイル: tensor_load.py プロジェクト: ljnpu/RBniCS
 def _basic_tensor_load(tensor, directory, filename):
     mpi_comm = tensor.mpi_comm()
     if not has_pybind11():
         mpi_comm = mpi_comm.tompi4py()
     form = tensor.generator._form
     # Read in generator
     full_filename_generator = os.path.join(str(directory), filename + ".generator")
     def load_generator():
         if os.path.exists(full_filename_generator):
             with open(full_filename_generator, "r") as generator_file:
                 return generator_file.readline()
         else:
             raise OSError
     generator_string = parallel_io(load_generator, mpi_comm)
     # Read in generator mpi size
     full_filename_generator_mpi_size = os.path.join(str(directory), filename + ".generator_mpi_size")
     def load_generator_mpi_size():
         if os.path.exists(full_filename_generator_mpi_size):
             with open(full_filename_generator_mpi_size, "r") as generator_mpi_size_file:
                 return generator_mpi_size_file.readline()
         else:
             raise OSError
     generator_mpi_size_string = parallel_io(load_generator_mpi_size, mpi_comm)
     # Read in generator mapping from processor dependent indices (at the time of saving) to processor independent (global_cell_index, cell_dof) tuple
     permutation = _permutation_load(tensor, directory, filename, form, generator_string + "_" + generator_mpi_size_string, mpi_comm)
     _tensor_load(tensor, directory, filename, permutation, mpi_comm)
コード例 #8
0
def nonzero_values(function):
    if has_pybind11():
        serialized_vector = Vector(MPI.COMM_SELF)
    else:
        serialized_vector = Vector(mpi_comm_self())
    function.vector().gather(serialized_vector, array(range(function.function_space().dim()), "intc"))
    indices = nonzero(serialized_vector.get_local())
    return sort(serialized_vector.get_local()[indices])
コード例 #9
0
ファイル: conftest.py プロジェクト: ljnpu/RBniCS
def pytest_runtest_teardown(item, nextitem):
    # Do the normal teardown
    item.teardown()
    # Add a MPI barrier in parallel
    if has_pybind11():
        MPI.barrier(MPI.comm_world)
    else:
        MPI.barrier(mpi_comm_world())
コード例 #10
0
def _get_local_dof_to_component_map(V,
                                    component=None,
                                    dof_component_map=None,
                                    recursive=False):
    if component is None:
        component = [-1]
    if dof_component_map is None:
        dof_component_map = dict()

    # From dolfin/function/LagrangeInterpolator.cpp,
    # method LagrangeInterpolator::extract_dof_component_map
    # Copyright (C) 2014 Mikael Mortensen
    if V.num_sub_spaces() == 0:
        # Extract sub dofmaps recursively and store dof to component map
        if has_pybind11():
            cpp_code = """
                #include <pybind11/pybind11.h>
                #include <pybind11/stl.h>
                #include <dolfin/fem/DofMap.h>
                #include <dolfin/mesh/Mesh.h>
                
                std::vector<std::size_t> collapse_dofmap(std::shared_ptr<dolfin::DofMap> dofmap, std::shared_ptr<dolfin::Mesh> mesh)
                {
                    std::unordered_map<std::size_t, std::size_t> collapsed_map;
                    dofmap->collapse(collapsed_map, *mesh);
                    std::vector<std::size_t> collapsed_dofs;
                    collapsed_dofs.reserve(collapsed_map.size());
                    for (auto const& collapsed_map_item: collapsed_map)
                        collapsed_dofs.push_back(collapsed_map_item.second);
                    return collapsed_dofs;
                }
                
                PYBIND11_MODULE(SIGNATURE, m)
                {
                    m.def("collapse_dofmap", &collapse_dofmap);
                }
            """
            collapse_dofmap = compile_cpp_code(cpp_code).collapse_dofmap
            collapsed_dofs = collapse_dofmap(V.dofmap(), V.mesh())
        else:
            collapsed_dofs = V.dofmap().collapse(V.mesh())[1].values()
        component[0] += 1
        for collapsed_dof in collapsed_dofs:
            if not recursive:  # space with only one component, do not print it
                dof_component_map[collapsed_dof] = -1
            else:
                dof_component_map[collapsed_dof] = component[0]
    else:
        for i in range(V.num_sub_spaces()):
            Vs = V.sub(i)
            _get_local_dof_to_component_map(Vs, component, dof_component_map,
                                            True)

    if not recursive:
        return dof_component_map
    else:
        return None
コード例 #11
0
def is_parametrized_constant(expr):
    if not isinstance(expr, Expression):
        return False
    else:
        if has_pybind11():
            cppcode = expr._cppcode
        else:
            cppcode = expr.cppcode
        return bool(is_parametrized_constant.regex.match(cppcode))
コード例 #12
0
 def _basic_expression_description(expression):
     visited = set()
     coefficients_repr = dict()
     for n in wrapping.expression_iterator(expression):
         if n in visited:
             continue
         if has_pybind11():
             cppcode_attribute = "_cppcode"
         else:
             cppcode_attribute = "cppcode"
         if hasattr(n, cppcode_attribute):
             coefficients_repr[n] = str(getattr(n, cppcode_attribute))
             visited.add(n)
         elif wrapping.is_problem_solution_or_problem_solution_component_type(n):
             if wrapping.is_problem_solution_or_problem_solution_component(n):
                 (preprocessed_n, component, truth_solution) = wrapping.solution_identify_component(n)
                 problem = get_problem_from_solution(truth_solution)
             else:
                 (problem, component) = wrapping.get_auxiliary_problem_for_non_parametrized_function(n)
                 preprocessed_n = n
             coefficients_repr[preprocessed_n] = "solution of " + str(problem.name())
             if len(component) is 1 and component[0] is not None:
                 coefficients_repr[preprocessed_n] += ", component " + str(component[0])
             elif len(component) > 1:
                 coefficients_repr[preprocessed_n] += ", component " + str(component)
             # Make sure to skip any parent solution related to this one
             visited.add(n)
             visited.add(preprocessed_n)
             for parent_n in wrapping.solution_iterator(preprocessed_n):
                 visited.add(parent_n)
         elif isinstance(n, Constant):
             if has_pybind11():
                 vals = n.values()
             else:
                 x = zeros(1)
                 vals = zeros(n.value_size())
                 n.eval(vals, x)
             if len(vals) == 1:
                 coefficients_repr[n] = str(vals[0])
             else:
                 coefficients_repr[n] = str(vals.reshape(n.ufl_shape))
             visited.add(n)
     return coefficients_repr
コード例 #13
0
ファイル: conftest.py プロジェクト: ljnpu/RBniCS
def pytest_runtest_teardown(item, nextitem):
    # Carry out additional teardown
    if hasattr(item, "_runtest_teardown_function"):
        item._runtest_teardown_function()
    # Do the normal teardown
    item.teardown()
    # Add a MPI barrier in parallel
    if has_pybind11():
        MPI.barrier(MPI.comm_world)
    else:
        MPI.barrier(mpi_comm_world())
コード例 #14
0
ファイル: test_utils.py プロジェクト: prklVIP/multiphenics
def assert_block_matrices_equal(lhs, block_lhs, block_V):
    if isinstance(lhs, tuple):
        lhs11 = lhs[0][0]
        lhs12 = lhs[0][1]
        lhs21 = lhs[1][0]
        lhs22 = lhs[1][1]
    else:
        lhs11 = lhs
        lhs12 = None
        lhs21 = None
        lhs22 = None
    comm = block_lhs.mpi_comm()
    if not has_pybind11():
        comm = comm.tompi4py()
    if lhs22 is not None:
        map_block_to_original = gather_on_zero(
            (block_V.block_dofmap().block_to_original(0),
             block_V.block_dofmap().block_to_original(1)),
            comm,
            block_dofmap=block_V.block_dofmap(),
            dofmap=(block_V[0].dofmap(), block_V[1].dofmap()))
        lhs11g = gather_on_zero(lhs11, comm)
        lhs12g = gather_on_zero(lhs12, comm)
        lhs21g = gather_on_zero(lhs21, comm)
        lhs22g = gather_on_zero(lhs22, comm)
        if comm.Get_rank() == 0:
            lhsg = bmat([[lhs11g, lhs12g], [lhs21g, lhs22g]])
        else:
            lhsg = None
    else:
        map_block_to_original = gather_on_zero(
            block_V.block_dofmap().block_to_original(0),
            comm,
            block_dofmap=block_V.block_dofmap(),
            dofmap=block_V[0].dofmap())
        lhs11g = gather_on_zero(lhs11, comm)
        lhsg = lhs11g
    block_lhsg = gather_on_zero(block_lhs, comm)
    if comm.Get_rank() == 0:
        assert block_lhsg.shape[0] == len(map_block_to_original)
        assert block_lhsg.shape[1] == len(map_block_to_original)
        lhsg_for_assert = block_lhsg * 0.
        for (block_i, original_i) in map_block_to_original.items():
            for (block_j, original_j) in map_block_to_original.items():
                lhsg_for_assert[block_i, block_j] = lhsg[original_i,
                                                         original_j]
        assert array_equal(lhsg_for_assert, block_lhsg)
    comm.barrier()
コード例 #15
0
    def __init__(self, V, subdomains, shape_parametrization_expression):
        # Store dolfin data structure related to the geometrical parametrization
        self.mesh = subdomains.mesh()
        self.subdomains = subdomains
        self.reference_coordinates = self.mesh.coordinates().copy()
        self.deformation_V = VectorFunctionSpace(self.mesh, "Lagrange", 1)
        self.subdomain_id_to_deformation_dofs = dict()  # from int to list
        for cell in cells(self.mesh):
            subdomain_id = int(
                self.subdomains[cell]
            ) - 1  # tuple start from 0, while subdomains from 1
            if subdomain_id not in self.subdomain_id_to_deformation_dofs:
                self.subdomain_id_to_deformation_dofs[subdomain_id] = list()
            dofs = self.deformation_V.dofmap().cell_dofs(cell.index())
            for dof in dofs:
                global_dof = self.deformation_V.dofmap().local_to_global_index(
                    dof)
                if (self.deformation_V.dofmap().ownership_range()[0] <=
                        global_dof and global_dof <
                        self.deformation_V.dofmap().ownership_range()[1]):
                    self.subdomain_id_to_deformation_dofs[subdomain_id].append(
                        dof)
        # In parallel some subdomains may not be present on all processors. Fill in
        # the dict with empty lists if that is the case
        mpi_comm = self.mesh.mpi_comm()
        if not has_pybind11():
            mpi_comm = mpi_comm.tompi4py()
        min_subdomain_id = mpi_comm.allreduce(min(
            self.subdomain_id_to_deformation_dofs.keys()),
                                              op=MIN)
        max_subdomain_id = mpi_comm.allreduce(max(
            self.subdomain_id_to_deformation_dofs.keys()),
                                              op=MAX)
        for subdomain_id in range(min_subdomain_id, max_subdomain_id + 1):
            if subdomain_id not in self.subdomain_id_to_deformation_dofs:
                self.subdomain_id_to_deformation_dofs[subdomain_id] = list()
        # Subdomain numbering is contiguous
        assert min(self.subdomain_id_to_deformation_dofs.keys()) == 0
        assert len(self.subdomain_id_to_deformation_dofs.keys()) == max(
            self.subdomain_id_to_deformation_dofs.keys()) + 1

        # Store the shape parametrization expression
        self.shape_parametrization_expression = shape_parametrization_expression
        assert len(self.shape_parametrization_expression) == len(
            self.subdomain_id_to_deformation_dofs.keys())

        # Prepare storage for displacement expression, computed by init()
        self.displacement_expression = list()
コード例 #16
0
def get_global_dof_component(global_dof, V, global_to_local=None, local_dof_to_component=None):
    if global_to_local is None:
        global_to_local = get_global_dof_to_local_dof_map(V, V.dofmap())
    if local_dof_to_component is None:
        local_dof_to_component = get_local_dof_to_component_map(V)
    
    mpi_comm = V.mesh().mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    dof_component = None
    dof_component_processor = -1
    if global_dof in global_to_local:
        dof_component = local_dof_to_component[global_to_local[global_dof]]
        dof_component_processor = mpi_comm.rank
    dof_component_processor = mpi_comm.allreduce(dof_component_processor, op=MAX)
    assert dof_component_processor >= 0
    return mpi_comm.bcast(dof_component, root=dof_component_processor)
コード例 #17
0
def dict_assert_equal(dic, directory, filename):
    assert isinstance(dic, dict)
    with open(os.path.join(directory, filename), "rb") as infile:
        dic_in = pickle.load(infile)
    if has_pybind11():
        assert dic.keys() == dic_in.keys()
        for key in dic.keys():
            dic_value = dic[key]
            dic_in_value = dic_in[key]
            if isinstance(dic_value, set) and isinstance(
                    dic_in_value, array
            ):  # pybind11 has changed the return type of shared entities
                assert dic_value == set(dic_in_value.tolist())
            else:
                assert dic_value == dic_in_value
    else:
        assert dic == dic_in
コード例 #18
0
 def _basic_is_parametrized(expression_or_form, iterator):
     for node in iterator(expression_or_form):
         # ... parametrized expressions
         if isinstance(node, BaseExpression):
             if is_pull_back_expression(
                     node) and is_pull_back_expression_parametrized(node):
                 return True
             else:
                 if has_pybind11():
                     parameters = node._parameters
                 else:
                     parameters = node.user_parameters
                 if "mu_0" in parameters:
                     return True
         # ... problem solutions related to nonlinear terms
         elif wrapping.is_problem_solution_type(node):
             if wrapping.is_problem_solution(
                     node) or wrapping.is_problem_solution_dot(node):
                 return True
     return False
コード例 #19
0
def _build_dof_map_writer_mapping(V, gathered_dofmap): # was build_global_to_cell_dof in dolfin
    mpi_comm = V.mesh().mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    
    # Build global dof -> (global cell, local dof) map on root process
    global_dof_to_cell_dof = dict()
    if mpi_comm.rank == 0:
        i = 0
        while i < len(gathered_dofmap):
            global_cell_index = gathered_dofmap[i]
            i += 1
            num_dofs = gathered_dofmap[i]
            i += 1
            for j in range(num_dofs):
                if gathered_dofmap[i] not in global_dof_to_cell_dof:
                    global_dof_to_cell_dof[gathered_dofmap[i]] = list()
                global_dof_to_cell_dof[gathered_dofmap[i]].append([global_cell_index, j])
                i += 1
    global_dof_to_cell_dof = mpi_comm.bcast(global_dof_to_cell_dof, root=0)
    return global_dof_to_cell_dof
コード例 #20
0
def _get_local_dofmap(V):
    mesh = V.mesh()
    dofmap = V.dofmap()
    mpi_comm = mesh.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()

    local_dofmap = list()  # of integers

    # Check that local-to-global cell numbering is available
    assert mesh.topology().have_global_indices(mesh.topology().dim())

    # Get local-to-global map
    local_to_global_dof = dofmap.tabulate_local_to_global_dofs()

    # Build dof map data with global cell indices
    for cell in cells(mesh):
        local_cell_index = cell.index()
        global_cell_index = cell.global_index()
        cell_dofs = dofmap.cell_dofs(local_cell_index)

        cell_dofs_global = list()
        for cell_dof in cell_dofs:
            cell_dofs_global.append(local_to_global_dof[cell_dof])

        # Store information as follows: global_cell_index, size of dofs, cell dof global 1, ...., cell dof global end
        local_dofmap.append(global_cell_index)
        local_dofmap.append(len(cell_dofs))
        local_dofmap.extend(cell_dofs_global)

    # Gather dof map data on root process
    gathered_dofmap = mpi_comm.gather(local_dofmap, root=0)
    if mpi_comm.rank == 0:
        gathered_dofmap_flattened = list()
        for proc_map in gathered_dofmap:
            gathered_dofmap_flattened.extend(proc_map)
        return gathered_dofmap_flattened
    else:
        return list()
コード例 #21
0
def _build_dof_map_reader_mapping(V, gathered_dofmap): # was build_dof_map in dolfin
    mesh = V.mesh()
    mpi_comm = mesh.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()

    # Build global dofmap on root process
    dof_map = dict()
    if mpi_comm.rank == 0:
        i = 0
        while i < len(gathered_dofmap):
            global_cell_index = gathered_dofmap[i]
            i += 1
            num_dofs = gathered_dofmap[i]
            i += 1
            assert global_cell_index not in dof_map
            dof_map[global_cell_index] = list()
            for j in range(num_dofs):
                dof_map[global_cell_index].append(gathered_dofmap[i])
                i += 1
    dof_map = mpi_comm.bcast(dof_map, root=0)
    return dof_map
コード例 #22
0
# Copyright (C) 2016-2017 by the multiphenics authors
#
# This file is part of multiphenics.
#
# multiphenics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# multiphenics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with multiphenics. If not, see <http://www.gnu.org/licenses/>.
#

from dolfin import has_pybind11
from multiphenics.python import cpp

if has_pybind11():
    BlockPETScSubVector = cpp.la.BlockPETScSubVector
else:
    BlockPETScSubVector = cpp.BlockPETScSubVector
コード例 #23
0
 def __init__(self, V, subdomain_data=None, **kwargs):
     AbstractReducedMesh.__init__(self, V)
     #
     assert isinstance(V, tuple)
     assert len(V) in (1, 2)
     if len(V) == 2:
         assert V[0].mesh().ufl_domain() == V[1].mesh().ufl_domain()
     self.mesh = V[0].mesh()
     self.mpi_comm = self.mesh.mpi_comm()
     if not has_pybind11():
         self.mpi_comm = self.mpi_comm.tompi4py()
     self.V = V
     self.subdomain_data = subdomain_data
     
     # Detect if **kwargs are provided by the copy constructor in __getitem__
     if "copy_from" in kwargs:
         copy_from = kwargs["copy_from"]
         assert "key_as_slice" in kwargs
         key_as_slice = kwargs["key_as_slice"]
         assert "key_as_int" in kwargs
         key_as_int = kwargs["key_as_int"]
     else:
         copy_from = None
         key_as_slice = None
         key_as_int = None
         
     # Prepare storage for an helper dof to cell dict
     self.dof_to_cells = tuple() # of size len(V)
     # ... which is not initialized in the constructor to avoid wasting time online
     # ... since it is only needed offline in the append() method
     
     # Cell functions to mark cells (on the full mesh)
     self.reduced_mesh_markers = dict() # from N to MeshFunction
     # ... which again is not initialized here for performance reasons
     
     # DOFs list (of the full mesh) that need to be added at each N
     self.reduced_mesh_dofs_list = list() # list (of size N) of tuple (of size len(V)) of dofs
     if copy_from is not None:
         self.reduced_mesh_dofs_list.extend(copy_from.reduced_mesh_dofs_list[key_as_slice])
     # Prepare storage for helper mapping needed for I/O
     self.reduced_mesh_dofs_list__dof_map_writer_mapping = tuple() # of size len(V)
     self.reduced_mesh_dofs_list__dof_map_reader_mapping = tuple() # of size len(V)
     # ... which will be initialized as needed in the save and load methods
             
     # Reduced meshes, for all N
     self.reduced_mesh = dict() # from N to Mesh
     if copy_from is not None:
         self.reduced_mesh[key_as_int] = copy_from.reduced_mesh[key_as_int]
         
     # Reduced subdomain data, for all N
     self.reduced_subdomain_data = dict() # from N to dict from mesh MeshFunction to reduced_mesh MeshFunction
     if copy_from is not None:
         self.reduced_subdomain_data[key_as_int] = copy_from.reduced_subdomain_data[key_as_int]
         
     # Reduced function spaces, for all N
     self.reduced_function_spaces = dict() # from N to tuple (of size len(V)) of FunctionSpace
     if copy_from is not None:
         self.reduced_function_spaces[key_as_int] = copy_from.reduced_function_spaces[key_as_int]
         
     # DOFs list (of the reduced mesh) that need to be added at each N
     self.reduced_mesh_reduced_dofs_list = dict() # from N to list of tuple (of size len(V)) of dofs
     if copy_from is not None:
         self.reduced_mesh_reduced_dofs_list[key_as_int] = copy_from.reduced_mesh_reduced_dofs_list[key_as_int]
     # Prepare storage for helper mapping needed for I/O
     self.reduced_mesh_reduced_dofs_list__dof_map_writer_mapping = dict() # from N to tuple (of size len(V))
     self.reduced_mesh_reduced_dofs_list__dof_map_reader_mapping = dict() # from N to tuple (of size len(V))
     # ... which will be initialized as needed in the save and load methods
     
     # == The following members are related to auxiliary basis functions for nonlinear terms. == #
     # Spaces for auxiliary basis functions
     self._auxiliary_reduced_function_space = dict() # from (problem, N) to FunctionSpace
     if copy_from is not None:
         self._auxiliary_reduced_function_space = copy_from._auxiliary_reduced_function_space
     # Mapping between DOFs on the reduced mesh and DOFs on the full mesh for auxiliary basis functions
     self._auxiliary_dofs_to_reduced_dofs = dict() # from (problem, N) to dict from int to int
     if copy_from is not None:
         self._auxiliary_dofs_to_reduced_dofs = copy_from._auxiliary_dofs_to_reduced_dofs
     # Auxiliary basis functions
     self._auxiliary_basis_functions_matrix = dict() # from (problem, N) to BasisFunctionsMatrix
     if copy_from is not None:
         self._auxiliary_basis_functions_matrix = copy_from._auxiliary_basis_functions_matrix
     # Auxiliary function interpolator
     self._auxiliary_function_interpolator = dict() # from (problem, N) to function
     if copy_from is not None:
         self._auxiliary_function_interpolator = copy_from._auxiliary_function_interpolator
     # Prepare storage for helper mapping needed for I/O
     self._auxiliary_dofs__dof_map_writer_mapping = dict() # from problem
     self._auxiliary_dofs__dof_map_reader_mapping = dict() # from problem
     self._auxiliary_reduced_dofs__dof_map_writer_mapping = dict() # from (problem, N)
     self._auxiliary_reduced_dofs__dof_map_reader_mapping = dict() # from (problem, N)
     # ... which will be initialized as needed in the save and load methods
     # Store directory and filename passed to save()
     self._auxiliary_io_directory = None
     self._auxiliary_io_filename = None
     if copy_from is not None:
         self._auxiliary_io_directory = copy_from._auxiliary_io_directory
         self._auxiliary_io_filename = copy_from._auxiliary_io_filename
コード例 #24
0
def create_submesh(mesh, markers):
    mpi_comm = mesh.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    assert isinstance(markers, MeshFunctionBool)
    assert markers.dim() == mesh.topology().dim()
    marker_id = True
    
    # == 1. Extract marked cells == #
    # Dolfin does not support a distributed mesh that is empty on some processes.
    # cbcpost gets around this by moving a single cell from the a non-empty processor to an empty one.
    # Note that, however, this cannot work if the number of marked cell is less than the number of processors.
    # In the interest of considering this case, we enable at least one cell (arbitrarily) on each processor.
    # We find this solution acceptable for our purposes, despite the increase of the reduced mesh size,
    # since we are never actually interested in solving a PDE on the reduced mesh, but rather only in
    # assemblying tensors on it and extract their values at some locations.
    backup_first_marker_id = None
    if marker_id not in markers.array():
        backup_first_marker_id = markers.array()[0]
        markers.array()[0] = marker_id
    assert marker_id in markers.array()
    
    # == 2. Create submesh == #
    submesh = Mesh(mesh.mpi_comm())
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())
    # Extract cells from mesh with specified marker_id
    mesh_cell_indices = where(markers.array() == marker_id)[0]
    mesh_cells = mesh.cells()[mesh_cell_indices]
    mesh_global_cell_indices = sorted([mesh.topology().global_indices(mesh.topology().dim())[cell_index] for cell_index in mesh_cell_indices])
    # Get vertices of extracted cells
    mesh_vertex_indices = unique(mesh_cells.flatten())
    mesh_global_vertex_indices = sorted([mesh.topology().global_indices(0)[vertex_index] for vertex_index in mesh_vertex_indices])
    # Number vertices in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all vertices from all processors
    allgathered_mesh_global_vertex_indices__non_empty_processors = list()
    allgathered_mesh_global_vertex_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_vertex_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
        else:
            allgathered_mesh_global_vertex_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
    allgathered_mesh_global_vertex_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__non_empty_processors))
    allgathered_mesh_global_vertex_indices__empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global vertex index to submesh global vertex index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_vertex_global_indices = dict()
    _submesh_vertex_global_index = 0
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__non_empty_processors:
        assert mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices
        allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
        _submesh_vertex_global_index += 1
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__empty_processors:
        if mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices:
            allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
            _submesh_vertex_global_index += 1
    # Number cells in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all cells from all processors
    allgathered_mesh_global_cell_indices__non_empty_processors = list()
    allgathered_mesh_global_cell_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_cell_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
        else:
            allgathered_mesh_global_cell_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
    allgathered_mesh_global_cell_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__non_empty_processors))
    allgathered_mesh_global_cell_indices__empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global cell index to submesh global cell index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_cell_global_indices = dict()
    _submesh_cell_global_index = 0
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__non_empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    # Also create a mapping from mesh local vertex index to submesh local vertex index.
    mesh_to_submesh_vertex_local_indices = dict(zip(mesh_vertex_indices, list(range(len(mesh_vertex_indices)))))
    # Also create a mapping from mesh local cell index to submesh local cell index.
    mesh_to_submesh_cell_local_indices = dict(zip(mesh_cell_indices, list(range(len(mesh_cell_indices)))))
    # Now, define submesh cells
    submesh_cells = list()
    for i, c in enumerate(mesh_cells):
        submesh_cells.append([mesh_to_submesh_vertex_local_indices[j] for j in c])
    # Store vertices as submesh_vertices[local_index] = (global_index, coordinates)
    submesh_vertices = dict()
    for mesh_vertex_local_index, submesh_vertex_local_index in mesh_to_submesh_vertex_local_indices.items():
        submesh_vertices[submesh_vertex_local_index] = (
            allgathered_mesh_to_submesh_vertex_global_indices[mesh.topology().global_indices(0)[mesh_vertex_local_index]],
            mesh.coordinates()[mesh_vertex_local_index]
        )
    # Collect the global number of vertices and cells
    global_num_cells = mpi_comm.allreduce(len(submesh_cells), op=SUM)
    global_num_vertices = len(allgathered_mesh_to_submesh_vertex_global_indices)
    # Fill in mesh_editor
    mesh_editor.init_vertices_global(len(submesh_vertices), global_num_vertices)
    mesh_editor.init_cells_global(len(submesh_cells), global_num_cells)
    for local_index, cell_vertices in enumerate(submesh_cells):
        if has_pybind11():
            mesh_editor.add_cell(local_index, cell_vertices)
        else:
            mesh_editor.add_cell(local_index, *cell_vertices)
    for local_index, (global_index, coordinates) in submesh_vertices.items():
        mesh_editor.add_vertex_global(local_index, global_index, coordinates)
    mesh_editor.close()
    # Initialize topology
    submesh.topology().init(0, len(submesh_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(submesh_cells), global_num_cells)
    # Correct the global index of cells
    for local_index in range(len(submesh_cells)):
        submesh.topology().set_global_index(
            submesh.topology().dim(),
            local_index,
            allgathered_mesh_to_submesh_cell_global_indices[mesh_global_cell_indices[local_index]]
        )
    
    # == 3. Store (local) mesh to/from submesh map for cells, facets and vertices == #
    # Cells
    submesh.mesh_to_submesh_cell_local_indices = mesh_to_submesh_cell_local_indices
    submesh.submesh_to_mesh_cell_local_indices = mesh_cell_indices
    # Vertices
    submesh.mesh_to_submesh_vertex_local_indices = mesh_to_submesh_vertex_local_indices
    submesh.submesh_to_mesh_vertex_local_indices = mesh_vertex_indices
    # Facets
    mesh_vertices_to_mesh_facets = dict()
    mesh_facets_to_mesh_vertices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facet_vertices = list()
            for mesh_facet_vertex in vertices(mesh_facet):
                mesh_facet_vertices.append(mesh_facet_vertex.index())
            mesh_facet_vertices = tuple(sorted(mesh_facet_vertices))
            if mesh_facet_vertices in mesh_vertices_to_mesh_facets:
                assert mesh_vertices_to_mesh_facets[mesh_facet_vertices] == mesh_facet.index()
            else:
                mesh_vertices_to_mesh_facets[mesh_facet_vertices] = mesh_facet.index()
            if mesh_facet.index() in mesh_facets_to_mesh_vertices:
                assert mesh_facets_to_mesh_vertices[mesh_facet.index()] == mesh_facet_vertices
            else:
                mesh_facets_to_mesh_vertices[mesh_facet.index()] = mesh_facet_vertices
    submesh_vertices_to_submesh_facets = dict()
    submesh_facets_to_submesh_vertices = dict()
    for submesh_facet in facets(submesh):
        submesh_facet_vertices = list()
        for submesh_facet_vertex in vertices(submesh_facet):
            submesh_facet_vertices.append(submesh_facet_vertex.index())
        submesh_facet_vertices = tuple(sorted(submesh_facet_vertices))
        assert submesh_facet_vertices not in submesh_vertices_to_submesh_facets
        submesh_vertices_to_submesh_facets[submesh_facet_vertices] = submesh_facet.index()
        assert submesh_facet.index() not in submesh_facets_to_submesh_vertices
        submesh_facets_to_submesh_vertices[submesh_facet.index()] = submesh_facet_vertices
    mesh_to_submesh_facets_local_indices = dict()
    for (mesh_facet_index, mesh_vertices) in mesh_facets_to_mesh_vertices.items():
        submesh_vertices = tuple(sorted([submesh.mesh_to_submesh_vertex_local_indices[mesh_vertex] for mesh_vertex in mesh_vertices]))
        submesh_facet_index = submesh_vertices_to_submesh_facets[submesh_vertices]
        mesh_to_submesh_facets_local_indices[mesh_facet_index] = submesh_facet_index
    submesh_to_mesh_facets_local_indices = dict()
    for (submesh_facet_index, submesh_vertices) in submesh_facets_to_submesh_vertices.items():
        mesh_vertices = tuple(sorted([submesh.submesh_to_mesh_vertex_local_indices[submesh_vertex] for submesh_vertex in submesh_vertices]))
        mesh_facet_index = mesh_vertices_to_mesh_facets[mesh_vertices]
        submesh_to_mesh_facets_local_indices[submesh_facet_index] = mesh_facet_index
    submesh.mesh_to_submesh_facet_local_indices = mesh_to_submesh_facets_local_indices
    submesh.submesh_to_mesh_facet_local_indices = list()
    assert min(submesh_to_mesh_facets_local_indices.keys()) == 0
    assert max(submesh_to_mesh_facets_local_indices.keys()) == len(submesh_to_mesh_facets_local_indices.keys()) - 1
    for submesh_facet_index in range(len(submesh_to_mesh_facets_local_indices)):
        submesh.submesh_to_mesh_facet_local_indices.append(submesh_to_mesh_facets_local_indices[submesh_facet_index])
    # == 3bis. Prepare (temporary) global indices of facets == #
    # Wrapper to DistributedMeshTools::number_entities
    if has_pybind11():
        cpp_code = """
            #include <pybind11/pybind11.h>
            #include <dolfin/mesh/DistributedMeshTools.h>
            #include <dolfin/mesh/Mesh.h>
            
            void initialize_global_indices(std::shared_ptr<dolfin::Mesh> mesh, std::size_t dim)
            {
                dolfin::DistributedMeshTools::number_entities(*mesh, dim);
            }
            
            PYBIND11_MODULE(SIGNATURE, m)
            {
                m.def("initialize_global_indices", &initialize_global_indices);
            }
        """
        initialize_global_indices = compile_cpp_code(cpp_code).initialize_global_indices
    else:
        cpp_code = """
            void initialize_global_indices(Mesh & mesh, std::size_t dim)
            {
                DistributedMeshTools::number_entities(mesh, dim);
            }
        """
        initialize_global_indices = compile_extension_module(cpp_code, additional_system_headers=["dolfin/mesh/DistributedMeshTools.h"]).initialize_global_indices
    initialize_global_indices(mesh, mesh.topology().dim() - 1)
    # Prepare global indices of facets
    mesh_facets_local_to_global_indices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facets_local_to_global_indices[mesh_facet.index()] = mesh_facet.global_index()
    mesh_facets_global_indices_in_submesh = list()
    for mesh_facet_local_index in mesh_to_submesh_facets_local_indices.keys():
        mesh_facets_global_indices_in_submesh.append(mesh_facets_local_to_global_indices[mesh_facet_local_index])
    allgathered__mesh_facets_global_indices_in_submesh = list()
    for r in range(mpi_comm.size):
        allgathered__mesh_facets_global_indices_in_submesh.extend(mpi_comm.bcast(mesh_facets_global_indices_in_submesh, root=r))
    allgathered__mesh_facets_global_indices_in_submesh = sorted(set(allgathered__mesh_facets_global_indices_in_submesh))
    mesh_to_submesh_facets_global_indices = dict()
    for (submesh_facet_global_index, mesh_facet_global_index) in enumerate(allgathered__mesh_facets_global_indices_in_submesh):
        mesh_to_submesh_facets_global_indices[mesh_facet_global_index] = submesh_facet_global_index
    submesh_facets_local_to_global_indices = dict()
    for (submesh_facet_local_index, mesh_facet_local_index) in submesh_to_mesh_facets_local_indices.items():
        submesh_facets_local_to_global_indices[submesh_facet_local_index] = mesh_to_submesh_facets_global_indices[mesh_facets_local_to_global_indices[mesh_facet_local_index]]
    
    # == 4. Assign shared vertices == #
    shared_entities_dimensions = {
        "vertex": 0,
        "facet": submesh.topology().dim() - 1,
        "cell": submesh.topology().dim()
    }
    shared_entities_class = {
        "vertex": Vertex,
        "facet": Facet,
        "cell": Cell
    }
    shared_entities_iterator = {
        "vertex": vertices,
        "facet": facets,
        "cell": cells
    }
    shared_entities_submesh_global_index_getter = {
        "vertex": lambda entity: entity.global_index(),
        "facet": lambda entity: submesh_facets_local_to_global_indices[entity.index()],
        "cell": lambda entity: entity.global_index()
    }
    for entity_type in ["vertex", "facet", "cell"]: # do not use .keys() because the order is important
        dim = shared_entities_dimensions[entity_type]
        class_ = shared_entities_class[entity_type]
        iterator = shared_entities_iterator[entity_type]
        submesh_global_index_getter = shared_entities_submesh_global_index_getter[entity_type]
        # Get shared entities from mesh. A subset of these will end being shared entities also the submesh
        # (thanks to the fact that we do not redistribute cells from one processor to another)
        if mpi_comm.size > 1: # some entities may not be initialized in serial, since they are not needed
            assert mesh.topology().have_shared_entities(dim), "Mesh shared entities have not been initialized for dimension " + str(dim)
        if mesh.topology().have_shared_entities(dim): # always true in parallel (when really needed)
            # However, it may happen that an entity which has been selected is not shared anymore because only one of
            # the sharing processes has it in the submesh. For instance, consider the case
            # of two cells across the interface (located on a facet f) between two processors. It may happen that
            # only one of the two cells is selected: the facet f and its vertices are not shared anymore!
            # For this reason, we create a new dict from global entity index to processors sharing them. Thus ...
            # ... first of all get global indices corresponding to local entities
            if entity_type in ["vertex", "cell"]:
                assert submesh.topology().have_global_indices(dim), "Submesh global indices have not been initialized for dimension " + str(dim)
            submesh_local_entities_global_index = list()
            submesh_local_entities_global_to_local_index = dict()
            for entity in iterator(submesh):
                local_entity_index = entity.index()
                global_entity_index = submesh_global_index_getter(entity)
                submesh_local_entities_global_index.append(global_entity_index)
                submesh_local_entities_global_to_local_index[global_entity_index] = local_entity_index
            # ... then gather all global indices from all processors
            gathered__submesh_local_entities_global_index = list() # over processor id
            for r in range(mpi_comm.size):
                gathered__submesh_local_entities_global_index.append(mpi_comm.bcast(submesh_local_entities_global_index, root=r))
            # ... then create dict from global index to processors sharing it
            submesh_shared_entities__global = dict()
            for r in range(mpi_comm.size):
                for global_entity_index in gathered__submesh_local_entities_global_index[r]:
                    if global_entity_index not in submesh_shared_entities__global:
                        submesh_shared_entities__global[global_entity_index] = list()
                    submesh_shared_entities__global[global_entity_index].append(r)
            # ... and finally popuplate shared entities dict, which is the same as the dict above except that
            # the current processor rank is removed and a local indexing is used
            submesh_shared_entities = dict() # from local index to list of integers
            for (global_entity_index, processors) in submesh_shared_entities__global.items():
                if (
                    mpi_comm.rank in processors  # only local entities
                        and
                    len(processors) > 1 # it was still shared after submesh extraction
                ):
                    other_processors_list = list(processors)
                    other_processors_list.remove(mpi_comm.rank)
                    other_processors = array(other_processors_list, dtype=uintp)
                    submesh_shared_entities[submesh_local_entities_global_to_local_index[global_entity_index]] = other_processors

            # Need an extension module to populate shared_entities because in python each call to shared_entities
            # returns a temporary.
            if has_pybind11():
                cpp_code = """
                    #include <Eigen/Core>
                    #include <pybind11/pybind11.h>
                    #include <pybind11/eigen.h>
                    #include <dolfin/mesh/Mesh.h>
                    
                    using OtherProcesses = Eigen::Ref<const Eigen::Matrix<std::size_t, Eigen::Dynamic, 1>>;
                    
                    void set_shared_entities(std::shared_ptr<dolfin::Mesh> submesh, std::size_t idx, const OtherProcesses other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh->topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                    
                    PYBIND11_MODULE(SIGNATURE, m)
                    {
                        m.def("set_shared_entities", &set_shared_entities);
                    }
                """
                set_shared_entities = compile_cpp_code(cpp_code).set_shared_entities
            else:
                cpp_code = """
                    void set_shared_entities(Mesh & submesh, std::size_t idx, const Array<std::size_t>& other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh.topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                """
                set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
            for (submesh_entity_local_index, other_processors) in submesh_shared_entities.items():
                set_shared_entities(submesh, submesh_entity_local_index, other_processors, dim)
                
            log(DEBUG, "Local indices of shared entities for dimension " + str(dim) + ": " + str(list(submesh.topology().shared_entities(0).keys())))
            log(DEBUG, "Global indices of shared entities for dimension " + str(dim) + ": " + str([class_(submesh, local_index).global_index() for local_index in submesh.topology().shared_entities(dim).keys()]))
    
    # == 5. Also initialize submesh facets global indices, now that shared facets have been computed == #
    initialize_global_indices(submesh, submesh.topology().dim() - 1) # note that DOLFIN might change the numbering when compared to the one at 3bis
    
    # == 6. Restore backup_first_marker_id and return == #
    if backup_first_marker_id is not None:
        markers.array()[0] = backup_first_marker_id
    return submesh
コード例 #25
0
ファイル: conftest.py プロジェクト: ljnpu/RBniCS
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RBniCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RBniCS. If not, see <http://www.gnu.org/licenses/>.
#

from dolfin import MPI
from rbnics.utils.test import disable_matplotlib, enable_matplotlib, load_tempdir, save_tempdir, tempdir  # noqa
from dolfin import has_pybind11 # added back to dolfin as a side effect of rbnics import
if not has_pybind11():
    from dolfin import mpi_comm_world

# Customize item selection
def pytest_collection_modifyitems(session, config, items):
    # Deselect first using markers
    from _pytest.mark import pytest_collection_modifyitems as pytest_collection_modifyitems_from_marks # cannot import globally
    pytest_collection_modifyitems_from_marks(items, config)
    
    # Separated parametrized forms tests require clean UFL and DOLFIN counters ...
    deselect_separated_parametrized_forms = False
    if any([item.name.startswith("test_separated_parametrized_forms") for item in items]):
        # ... so they cannot be mixed with other tests
        if not all([item.name.startswith("test_separated_parametrized_forms") for item in items]):
            deselect_separated_parametrized_forms = True
        # ... and with each other (scalar vs vector vs mixed)
コード例 #26
0
def map_functionspaces_between_mesh_and_submesh(functionspace_on_mesh, mesh, functionspace_on_submesh, submesh, global_indices=True):
    mesh_dofs_to_submesh_dofs = dict()
    submesh_dofs_to_mesh_dofs = dict()
    
    # Initialize map from mesh dofs to submesh dofs, and viceversa
    if functionspace_on_mesh.num_sub_spaces() > 0:
        assert functionspace_on_mesh.num_sub_spaces() == functionspace_on_submesh.num_sub_spaces()
        for i in range(functionspace_on_mesh.num_sub_spaces()):
            (mesh_dofs_to_submesh_dofs_i, submesh_dofs_to_mesh_dofs_i) = map_functionspaces_between_mesh_and_submesh(functionspace_on_mesh.sub(i), mesh, functionspace_on_submesh.sub(i), submesh, global_indices)
            for (mesh_dof, submesh_dof) in mesh_dofs_to_submesh_dofs_i.items():
                assert mesh_dof not in mesh_dofs_to_submesh_dofs
                assert submesh_dof not in submesh_dofs_to_mesh_dofs
            mesh_dofs_to_submesh_dofs.update(mesh_dofs_to_submesh_dofs_i)
            submesh_dofs_to_mesh_dofs.update(submesh_dofs_to_mesh_dofs_i)
        # Return
        return (mesh_dofs_to_submesh_dofs, submesh_dofs_to_mesh_dofs)
    else:
        assert functionspace_on_mesh.ufl_element().family() in ("Lagrange", "Discontinuous Lagrange"), "The current implementation has been tested only for Lagrange or Discontinuous Lagrange function spaces"
        assert functionspace_on_submesh.ufl_element().family() in ("Lagrange", "Discontinuous Lagrange"), "The current implementation has been tested only for Lagrange or Discontinuous Lagrange function spaces"
        mesh_element = functionspace_on_mesh.element()
        mesh_dofmap = functionspace_on_mesh.dofmap()
        submesh_element = functionspace_on_submesh.element()
        submesh_dofmap = functionspace_on_submesh.dofmap()
        for submesh_cell in cells(submesh):
            submesh_dof_coordinates = submesh_element.tabulate_dof_coordinates(submesh_cell)
            submesh_cell_dofs = submesh_dofmap.cell_dofs(submesh_cell.index())
            if global_indices:
                submesh_cell_dofs = [functionspace_on_submesh.dofmap().local_to_global_index(local_dof) for local_dof in submesh_cell_dofs]
            mesh_cell = Cell(mesh, submesh.submesh_to_mesh_cell_local_indices[submesh_cell.index()])
            mesh_dof_coordinates = mesh_element.tabulate_dof_coordinates(mesh_cell)
            mesh_cell_dofs = mesh_dofmap.cell_dofs(mesh_cell.index())
            if global_indices:
                mesh_cell_dofs = [functionspace_on_mesh.dofmap().local_to_global_index(local_dof) for local_dof in mesh_cell_dofs]
            assert len(submesh_dof_coordinates) == len(mesh_dof_coordinates)
            assert len(submesh_cell_dofs) == len(mesh_cell_dofs)
            # Build a KDTree to compute distances from coordinates in mesh
            kdtree = KDTree(mesh_dof_coordinates)
            distances, mesh_indices = kdtree.query(submesh_dof_coordinates)
            # Map from mesh to submesh
            for (i, submesh_dof) in enumerate(submesh_cell_dofs):
                distance, mesh_index = distances[i], mesh_indices[i]
                assert distance < mesh_cell.h()*1e-5
                mesh_dof = mesh_cell_dofs[mesh_index]
                if mesh_dof not in mesh_dofs_to_submesh_dofs:
                    mesh_dofs_to_submesh_dofs[mesh_dof] = submesh_dof
                else:
                    assert mesh_dofs_to_submesh_dofs[mesh_dof] == submesh_dof
                if submesh_dof not in submesh_dofs_to_mesh_dofs:
                    submesh_dofs_to_mesh_dofs[submesh_dof] = mesh_dof
                else:
                    assert submesh_dofs_to_mesh_dofs[submesh_dof] == mesh_dof
        # Broadcast in parallel
        if global_indices:
            mpi_comm = mesh.mpi_comm()
            if not has_pybind11():
                mpi_comm = mpi_comm.tompi4py()
            allgathered_mesh_dofs_to_submesh_dofs = mpi_comm.bcast(mesh_dofs_to_submesh_dofs, root=0)
            allgathered_submesh_dofs_to_mesh_dofs = mpi_comm.bcast(submesh_dofs_to_mesh_dofs, root=0)
            for r in range(1, mpi_comm.size):
                allgathered_mesh_dofs_to_submesh_dofs.update(mpi_comm.bcast(mesh_dofs_to_submesh_dofs, root=r))
                allgathered_submesh_dofs_to_mesh_dofs.update(mpi_comm.bcast(submesh_dofs_to_mesh_dofs, root=r))
        else:
            allgathered_mesh_dofs_to_submesh_dofs = mesh_dofs_to_submesh_dofs
            allgathered_submesh_dofs_to_mesh_dofs = submesh_dofs_to_mesh_dofs
        # Return
        return (allgathered_mesh_dofs_to_submesh_dofs, allgathered_submesh_dofs_to_mesh_dofs)
コード例 #27
0
        def separate(self):
            class _SeparatedParametrizedForm_Replacer(Transformer):
                def __init__(self, mapping):
                    Transformer.__init__(self)
                    self.mapping = mapping

                def operator(self, e, *ops):
                    if e in self.mapping:
                        return self.mapping[e]
                    else:
                        return e._ufl_expr_reconstruct_(*ops)
                    
                def terminal(self, e):
                    return self.mapping.get(e, e)
            
            log(PROGRESS, "***        SEPARATE FORM COEFFICIENTS        ***")
            
            log(PROGRESS, "1. Extract coefficients")
            integral_to_coefficients = dict()
            for integral in self._form.integrals():
                log(PROGRESS, "\t Currently on integrand " + str(integral.integrand()))
                self._coefficients.append(list()) # of ParametrizedExpression
                for e in iter_expressions(integral):
                    log(PROGRESS, "\t\t Expression " + str(e))
                    pre_traversal_e = [n for n in pre_traversal(e)]
                    tree_nodes_skip = [False for _ in pre_traversal_e]
                    for (n_i, n) in enumerate(pre_traversal_e):
                        if not tree_nodes_skip[n_i]:
                            # Skip expressions which are trivially non parametrized
                            if isinstance(n, Argument):
                                log(PROGRESS, "\t\t Node " + str(n) + " is skipped because it is an Argument")
                                continue
                            elif isinstance(n, Constant):
                                log(PROGRESS, "\t\t Node " + str(n) + " is skipped because it is a Constant")
                                continue
                            elif isinstance(n, MultiIndex):
                                log(PROGRESS, "\t\t Node " + str(n) + " is skipped because it is a MultiIndex")
                                continue
                            # Skip all expressions with at least one leaf which is an Argument
                            for t in traverse_terminals(n):
                                if isinstance(t, Argument):
                                    log(PROGRESS, "\t\t Node " + str(n) + " is skipped because it contains an Argument")
                                    break
                            else: # not broken
                                log(PROGRESS, "\t\t Node " + str(n) + " and its descendants are being analyzed for non-parametrized check")
                                # Make sure to skip all descendants of this node in the outer loop
                                # Note that a map with key set to the expression is not enough to
                                # mark the node as visited, since the same expression may appear
                                # on different sides of the tree
                                pre_traversal_n = [d for d in pre_traversal(n)]
                                for (d_i, d) in enumerate(pre_traversal_n):
                                    assert d == pre_traversal_e[n_i + d_i] # make sure that we are marking the right node
                                    tree_nodes_skip[n_i + d_i] = True
                                # We might be able to strip any (non-parametrized) expression out
                                all_candidates = list()
                                internal_tree_nodes_skip = [False for _ in pre_traversal_n]
                                for (d_i, d) in enumerate(pre_traversal_n):
                                    if not internal_tree_nodes_skip[d_i]:
                                        # Skip all expressions where at least one leaf is not parametrized
                                        for t in traverse_terminals(d):
                                            if isinstance(t, BaseExpression):
                                                if wrapping.is_pull_back_expression(t) and not wrapping.is_pull_back_expression_parametrized(t):
                                                    log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains a non-parametrized pulled back expression")
                                                    break
                                                else:
                                                    if has_pybind11():
                                                        parameters = t._parameters
                                                    else:
                                                        parameters = t.user_parameters
                                                    if "mu_0" not in parameters:
                                                        log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains a non-parametrized expression")
                                                        break
                                            elif isinstance(t, Constant):
                                                log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains a constant")
                                                break
                                            elif isinstance(t, GeometricQuantity) and not isinstance(t, FacetNormal) and self._strict:
                                                log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains a geometric quantity and strict mode is on")
                                                break
                                            elif wrapping.is_problem_solution_or_problem_solution_component_type(t):
                                                if not wrapping.is_problem_solution_or_problem_solution_component(t):
                                                    log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains a non-parametrized function")
                                                    break
                                                elif self._strict: # solutions are not allowed, break
                                                    (_, _, solution) = wrapping.solution_identify_component(t)
                                                    log(PROGRESS, "\t\t\t Descendant node " + str(d) + " causes the non-parametrized check to break because it contains the solution of " + get_problem_from_solution(solution).name() + "and strict mode is on")
                                                    break
                                        else:
                                            at_least_one_expression_or_solution = False
                                            for t in traverse_terminals(d):
                                                if isinstance(t, BaseExpression): # which is parametrized, because previous for loop was not broken
                                                    at_least_one_expression_or_solution = True
                                                    log(PROGRESS, "\t\t\t Descendant node " + str(d) + " is a candidate after non-parametrized check because it contains the parametrized expression " + str(t))
                                                    break
                                                elif wrapping.is_problem_solution_or_problem_solution_component_type(t):
                                                    if wrapping.is_problem_solution_or_problem_solution_component(t):
                                                        at_least_one_expression_or_solution = True
                                                        (_, _, solution) = wrapping.solution_identify_component(t)
                                                        log(PROGRESS, "\t\t\t Descendant node " + str(d) + " is a candidate after non-parametrized check because it contains the solution of " + get_problem_from_solution(solution).name())
                                                        break
                                            if at_least_one_expression_or_solution:
                                                all_candidates.append(d)
                                                pre_traversal_d = [q for q in pre_traversal(d)]
                                                for (q_i, q) in enumerate(pre_traversal_d):
                                                    assert q == pre_traversal_n[d_i + q_i] # make sure that we are marking the right node
                                                    internal_tree_nodes_skip[d_i + q_i] = True
                                            else:
                                                log(PROGRESS, "\t\t\t Descendant node " + str(d) + " has not passed the non-parametrized because it is not a parametrized expression or a solution")
                                # Evaluate candidates
                                if len(all_candidates) == 0: # the whole expression was actually non-parametrized
                                    log(PROGRESS, "\t\t Node " + str(n) + " is skipped because it is a non-parametrized coefficient")
                                    continue
                                elif len(all_candidates) == 1: # the whole expression was actually parametrized
                                    log(PROGRESS, "\t\t Node " + str(n) + " will be accepted because it is a non-parametrized coefficient")
                                    pass
                                else: # part of the expression was not parametrized, and separating the non parametrized part may result in more than one coefficient
                                    if self._strict: # non parametrized coefficients are not allowed, so split the expression
                                        log(PROGRESS, "\t\t\t Node " + str(n) + " will be accepted because it is a non-parametrized coefficient with more than one candidate. It will be split because strict mode is on. Its split coefficients are " + ", ".join([str(c) for c in all_candidates]))
                                    else: # non parametrized coefficients are allowed, so go on with the whole expression
                                        log(PROGRESS, "\t\t\t Node " + str(n) + " will be accepted because it is a non-parametrized coefficient with more than one candidate. It will not be split because strict mode is off. Splitting it would have resulted in more than one coefficient, namely " + ", ".join([str(c) for c in all_candidates]))
                                        all_candidates = [n]
                                # Add the coefficient(s)
                                for candidate in all_candidates:
                                    def preprocess_candidate(candidate):
                                        if isinstance(candidate, Indexed):
                                            assert len(candidate.ufl_operands) == 2
                                            assert isinstance(candidate.ufl_operands[1], MultiIndex)
                                            if all([isinstance(index, FixedIndex) for index in candidate.ufl_operands[1].indices()]):
                                                log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an Indexed expression with fixed indices, resulting in a candidate " + str(candidate) + " of type " + str(type(candidate)))
                                                return candidate # no further preprocessing needed
                                            else:
                                                log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an Indexed expression with at least one mute index, resulting in a candidate " + str(candidate.ufl_operands[0]) + " of type " + str(type(candidate.ufl_operands[0])))
                                                return preprocess_candidate(candidate.ufl_operands[0])
                                        elif isinstance(candidate, IndexSum):
                                            assert len(candidate.ufl_operands) == 2
                                            assert isinstance(candidate.ufl_operands[1], MultiIndex)
                                            assert all([isinstance(index, MuteIndex) for index in candidate.ufl_operands[1].indices()])
                                            log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an IndexSum expression, resulting in a candidate " + str(candidate.ufl_operands[0]) + " of type " + str(type(candidate.ufl_operands[0])))
                                            return preprocess_candidate(candidate.ufl_operands[0])
                                        elif isinstance(candidate, ListTensor):
                                            candidates = set([preprocess_candidate(component) for component in candidate.ufl_operands])
                                            if len(candidates) is 1:
                                                preprocessed_candidate = candidates.pop()
                                                log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an ListTensor expression with a unique preprocessed component, resulting in a candidate " + str(preprocessed_candidate) + " of type " + str(type(preprocessed_candidate)))
                                                return preprocess_candidate(preprocessed_candidate)
                                            else:
                                                at_least_one_mute_index = False
                                                candidates_from_components = list()
                                                for component in candidates:
                                                    assert isinstance(component, (ComponentTensor, Indexed))
                                                    assert len(component.ufl_operands) == 2
                                                    assert isinstance(component.ufl_operands[1], MultiIndex)
                                                    if not all([isinstance(index, FixedIndex) for index in component.ufl_operands[1].indices()]):
                                                        at_least_one_mute_index = True
                                                    candidates_from_components.append(preprocess_candidate(component.ufl_operands[0]))
                                                if at_least_one_mute_index:
                                                    candidates_from_components = set(candidates_from_components)
                                                    assert len(candidates_from_components) is 1
                                                    preprocessed_candidate = candidates_from_components.pop()
                                                    log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an ListTensor expression with multiple preprocessed components with at least one mute index, resulting in a candidate " + str(preprocessed_candidate) + " of type " + str(type(preprocessed_candidate)))
                                                    return preprocess_candidate(preprocessed_candidate)
                                                else:
                                                    log(PROGRESS, "\t\t\t Preprocessed descendant node " + str(candidate) + " as an ListTensor expression with multiple preprocessed components with fixed indices, resulting in a candidate " + str(candidate) + " of type " + str(type(candidate)))
                                                    return candidate # no further preprocessing needed
                                        else:
                                            log(PROGRESS, "\t\t\t No preprocessing required for descendant node " + str(candidate) + " as a coefficient of type " + str(type(candidate)))
                                            return candidate
                                    preprocessed_candidate = preprocess_candidate(candidate)
                                    if preprocessed_candidate not in self._coefficients[-1]:
                                        self._coefficients[-1].append(preprocessed_candidate)
                                    log(PROGRESS, "\t\t\t Accepting descendant node " + str(preprocessed_candidate) + " as a coefficient of type " + str(type(preprocessed_candidate)))
                        else:
                            log(PROGRESS, "\t\t Node " + str(n) + " to be skipped because it is a descendant of a coefficient which has already been detected")
                if len(self._coefficients[-1]) == 0: # then there were no coefficients to extract
                    log(PROGRESS, "\t There were no coefficients to extract")
                    self._coefficients.pop() # remove the (empty) element that was added to possibly store coefficients
                else:
                    log(PROGRESS, "\t Extracted coefficients are:\n\t\t" + "\n\t\t".join([str(c) for c in self._coefficients[-1]]))
                    integral_to_coefficients[integral] = self._coefficients[-1]
            
            log(PROGRESS, "2. Prepare placeholders and forms with placeholders")
            for integral in self._form.integrals():
                # Prepare measure for the new form (from firedrake/mg/ufl_utils.py)
                measure = Measure(
                    integral.integral_type(),
                    domain=integral.ufl_domain(),
                    subdomain_id=integral.subdomain_id(),
                    subdomain_data=integral.subdomain_data(),
                    metadata=integral.metadata()
                )
                if integral not in integral_to_coefficients:
                    log(PROGRESS, "\t Adding form for integrand " + str(integral.integrand()) + " to unchanged forms")
                    self._form_unchanged.append(integral.integrand()*measure)
                else:
                    log(PROGRESS, "\t Preparing form with placeholders for integrand " + str(integral.integrand()))
                    self._placeholders.append(list()) # of Constants
                    placeholders_dict = dict()
                    for c in integral_to_coefficients[integral]:
                        self._placeholders[-1].append(Constant(self._NaN*ones(c.ufl_shape)))
                        placeholders_dict[c] = self._placeholders[-1][-1]
                        log(PROGRESS, "\t\t " + str(placeholders_dict[c]) + " is the placeholder for " + str(c))
                    replacer = _SeparatedParametrizedForm_Replacer(placeholders_dict)
                    new_integrand = apply_transformer(integral.integrand(), replacer)
                    self._form_with_placeholders.append(new_integrand*measure)
                
            log(PROGRESS, "3. Assert that there are no parametrized expressions left")
            for form in self._form_with_placeholders:
                for integral in form.integrals():
                    for e in pre_traversal(integral.integrand()):
                        if isinstance(e, BaseExpression):
                            assert not (wrapping.is_pull_back_expression(e) and wrapping.is_pull_back_expression_parametrized(e)), "Form " + str(integral) + " still contains a parametrized pull back expression"
                            if has_pybind11():
                                parameters = e._parameters
                            else:
                                parameters = e.user_parameters
                            assert "mu_0" not in parameters, "Form " + str(integral) + " still contains a parametrized expression"
            
            log(PROGRESS, "4. Prepare coefficients hash codes")
            for addend in self._coefficients:
                self._placeholder_names.append(list()) # of string
                for factor in addend:
                    self._placeholder_names[-1].append(wrapping.expression_name(factor))
                    
            log(PROGRESS, "5. Assert list length consistency")
            assert len(self._coefficients) == len(self._placeholders)
            assert len(self._coefficients) == len(self._placeholder_names)
            for (c, p, pn) in zip(self._coefficients, self._placeholders, self._placeholder_names):
                assert len(c) == len(p)
                assert len(c) == len(pn)
            assert len(self._coefficients) == len(self._form_with_placeholders)
            
            log(PROGRESS, "*** DONE - SEPARATE FORM COEFFICIENTS - DONE ***")
            log(PROGRESS, "")
コード例 #28
0
def get_mpi_comm(tensor: (GenericMatrix, GenericVector)):
    mpi_comm = tensor.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    return mpi_comm
コード例 #29
0
def get_mpi_comm(V: FunctionSpace):
    mpi_comm = V.mesh().mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    return mpi_comm
コード例 #30
0
 def _basic_expression_name(expression):
     str_repr = ""
     coefficients_replacement = dict()
     # Preprocess indices first, as their numeric value might change from run to run, but they
     # are always sorted the same way
     indices = set()
     min_index = None
     for t in traverse_unique_terminals(expression):
         if isinstance(t, MultiIndex):
             for i in t.indices():
                 if isinstance(i, MuteIndex):
                     if min_index is None or i.count() < min_index:
                         min_index = i.count()
                     indices.add(i)
     for i in indices:
         coefficients_replacement[repr(i)] = "MuteIndexRBniCS(" + str(i.count() - min_index) + ")"
     # Process the expression
     visited = set()
     for n in wrapping.expression_iterator(expression):
         if n in visited:
             continue
         if has_pybind11():
             cppcode_attribute = "_cppcode"
         else:
             cppcode_attribute = "cppcode"
         if hasattr(n, cppcode_attribute):
             coefficients_replacement[repr(n)] = str(getattr(n, cppcode_attribute))
             str_repr += repr(getattr(n, cppcode_attribute))
             visited.add(n)
         elif wrapping.is_problem_solution_type(n):
             if wrapping.is_problem_solution(n):
                 (preprocessed_n, component, truth_solution) = wrapping.solution_identify_component(n)
                 problem = get_problem_from_solution(truth_solution)
                 coefficients_replacement[repr(preprocessed_n)] = "solution of " + str(problem.name())
             elif wrapping.is_problem_solution_dot(n):
                 (preprocessed_n, component, truth_solution_dot) = wrapping.solution_dot_identify_component(n)
                 problem = get_problem_from_solution_dot(truth_solution_dot)
                 coefficients_replacement[repr(preprocessed_n)] = "solution_dot of " + str(problem.name())
             else:
                 (preprocessed_n, component, problem) = wrapping.get_auxiliary_problem_for_non_parametrized_function(n)
                 coefficients_replacement[repr(preprocessed_n)] = "non parametrized function associated to auxiliary problem " + str(problem.name())
             if len(component) is 1 and component[0] is not None:
                 coefficients_replacement[repr(preprocessed_n)] += ", component " + str(component[0])
             elif len(component) > 1:
                 coefficients_replacement[repr(preprocessed_n)] += ", component " + str(component)
             str_repr += coefficients_replacement[repr(preprocessed_n)]
             # Make sure to skip any parent solution related to this one
             visited.add(n)
             visited.add(preprocessed_n)
             for parent_n in wrapping.solution_iterator(preprocessed_n):
                 visited.add(parent_n)
         elif isinstance(n, Constant):
             if has_pybind11():
                 vals = n.values()
             else:
                 x = zeros(1)
                 vals = zeros(n.value_size())
                 n.eval(vals, x)
             coefficients_replacement[repr(n)] = str(vals)
             str_repr += repr(str(vals))
             visited.add(n)
         else:
             str_repr += repr(n)
             visited.add(n)
     for key, value in coefficients_replacement.items():
         str_repr = str_repr.replace(key, value)
     hash_code = hashlib.sha1(str_repr.encode("utf-8")).hexdigest()
     return hash_code