Exemplo n.º 1
0
def import_from_gmsh(fname):
    "Convert from gmsh to dolfin"

    # read with meshio
    msh = meshio.read(fname)

    # create a DOLFIN mesh (assuming 2d)
    gdim, tdim = 2, 2
    mm = Mesh()
    editor = MeshEditor()
    editor.open(mm, "triangle", gdim, tdim)

    npt = msh.points.shape[0]
    nc = msh.get_cells_type("triangle").shape[0]

    editor.init_vertices_global(npt, npt)
    editor.init_cells_global(nc, nc)

    for i, p in enumerate(msh.points):
        editor.add_vertex(i, p[:2])

    for i, c in enumerate(msh.get_cells_type("triangle")):
        editor.add_cell(i, c)

    editor.close()

    # domains
    md = mm.domains()
    md.init(tdim)
    markers = {}

    if 'gmsh:physical' not in msh.cell_data_dict:
        # no markers at all
        return mm, markers

    phy = msh.cell_data_dict['gmsh:physical']
    if 'triangle' in phy:
        for eid, val in enumerate(phy['triangle']):
            md.set_marker((eid, val), 2)

    if 'line' in phy:
        mm.init(0, 1)
        p2e = mm.topology()(0, 1)

        for l, k in zip(msh.get_cells_type("line"), phy['line']):
            e = set(p2e(l[0])).intersection(p2e(l[1])).pop()
            md.set_marker((e, k), 1)

    if 'vertex' in phy:
        for eid, val in zip(msh.get_cells_type("vertex"), phy['vertex']):
            md.set_marker((eid[0], val), 0)

    # names
    markers = tuple(
        {n: v.item()
         for n, (v, d) in msh.field_data.items() if d == dim}
        for dim in range(tdim + 1))

    return mm, markers
Exemplo n.º 2
0
def convert(ifilename, handler):
    """ Convert from Abaqus.

    The Abaqus format first defines a node block, then there should be a number
    of elements containing these nodes.
    """

    # Dictionary of nodes (maps node id to coordinates)
    nodes = {}

    # Dictionary of elements (maps cell id to list of cell nodes)
    elems = {}

    # Lists of nodes for given name (key)
    node_sets = {}

    # Lists of cells for given name (key)
    cell_sets = {}

    # Lists of surfaces for given name (key) in the format:
    # {'SS1': [set(['SS1_S1', 'S1']), set(['SS1_S4', 'S4'])]},
    # where SS1 is the name of the surface, SS1_S1 is the name of the
    # cell list whose first face is to be selected, ...
    surface_sets = {}

    # Open file Abaqus file
    file = open(ifilename, 'r')
    csv_file = csv.reader(file, delimiter=',', skipinitialspace=True)

    node_set_name = None
    generate = None

    # Set intial state state
    state = State.Init

    # Read data from input file
    for l in csv_file:

        # Sanity check
        if (len(l) == 0): print("Ooops, zero length.")

        if l[0].startswith('**'): # Pass over comments
            continue
        elif l[0].startswith('*'): # Have a keyword
            state = State.Unknown

            if l[0].lower() == "*heading":
                state = State.ReadHeading

            elif l[0].lower() == "*part":
                part_name = _read_part_name(l)

            elif l[0].lower() == "*end part":
                state = State.Invalid

            elif l[0].lower() == "*node":
                node_set_name = _create_node_list_entry(node_sets, l)
                state = State.ReadNodes

            elif l[0].lower() == "*element":
                cell_type, cell_set_name = _read_element_keywords(cell_sets, l)
                state = State.ReadCells

            elif l[0].lower() == "*nset":
                node_set_name, generate = _read_nset_keywords(node_sets, l)
                state = State.ReadNodeSet

            elif l[0].lower() == "*elset":
                cell_set_name, generate = _read_elset_keywords(cell_sets, l)
                if generate:
                    print("WARNING: generation of *elsets not tested.")
                state = State.ReadCellSet

            elif l[0].lower() == "*surface":
                surface_set_name, generate = _read_surface_keywords(surface_sets, l)
                state = State.ReadSurfaceSet

            else:
                print("WARNING: unrecognised Abaqus input keyword:", l[0])
                state = State.Unknown

        else:

            if state == State.ReadHeading:
                model_name = _read_heading(l)

            elif state == State.ReadNodes:
                node_id = int(l[0]) - 1
                coords = [float(c) for c in l[1:]]
                nodes[node_id] = coords
                if node_set_name is not None:
                    node_sets[node_set_name].add(node_id)

            elif state == State.ReadCells:
                cell_id = int(l[0]) - 1
                cell_connectivity = [int(v) - 1 for v in l[1:]]
                elems[cell_id] = cell_connectivity
                if cell_set_name is not None:
                    cell_sets[cell_set_name].add(cell_id)

            elif state == State.ReadNodeSet:

                try:
                    if generate:
                        n0, n1, increment = l
                        node_range = list(range(int(n0) - 1, int(n1) - 1, int(increment)))
                        node_range.append(int(n1) - 1)
                        node_sets[node_set_name].update(node_range)
                    else:
                        # Strip empty term at end of list, if present
                        if l[-1] == '': l.pop(-1)
                        node_range = [int(n) - 1 for n in l]
                        node_sets[node_set_name].update(node_range)
                except:
                    print("WARNING: Non-integer node sets not yet supported.")

            elif state == State.ReadCellSet:
                try:
                    if generate:
                        n0, n1, increment = l
                        cell_range = list(range(int(n0) - 1, int(n1) - 1, int(increment)))
                        cell_range.append(int(n1) - 1)
                        cell_sets[cell_set_name].update(cell_range)
                    else:
                        # Strip empty term at end of list, if present
                        if l[-1] == '': l.pop(-1)
                        cell_range = [int(n) - 1 for n in l]
                        cell_sets[cell_set_name].update(cell_range)
                except:
                    print("WARNING: Non-integer element sets not yet supported.")

            elif state == State.ReadSurfaceSet:
                # Strip empty term at end of list, if present
                if l[-1] == '': l.pop(-1)
                surface_sets[surface_set_name].update([tuple(l)])

            elif state == State.Invalid: # part
                raise Exception("Inavlid Abaqus parser state..")


    # Close CSV object
    file.close()
    del csv_file

    # Write data to XML file
    # Note that vertices/cells must be consecutively numbered, which
    # isn't necessarily the case in Abaqus. Therefore we enumerate and
    # translate original IDs to sequence indexes if gaps are present.

    # FIXME
    handler.set_mesh_type("tetrahedron", 3)

    process_facets = len(surface_sets) > 0
    if process_facets:
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error("DOLFIN must be installed to handle Abaqus boundary regions")

        mesh = Mesh()
        mesh_editor = MeshEditor()
        mesh_editor.open(mesh, 3, 3)

    node_ids_order = {}
    # Check for gaps in vertex numbering
    node_ids = list(iterkeys(nodes))
    if len(node_ids) > 0:
        vertex_gap = (min(node_ids) != 0 or max(node_ids) != len(node_ids) - 1)
        for x, y in enumerate(node_ids):
            node_ids_order[y]= x  # Maps Abaqus IDs to Dolfin IDs
    else:
        vertex_gap = True

    # Check for gaps in cell numbering
    elemids = list(iterkeys(elems))
    if len(elemids) > 0:
        cell_gap = (min(elemids) != 0 or max(elemids) != len(elemids) - 1)
    else:
        cell_gap = True

    # Write vertices to XML file
    handler.start_vertices(len(nodes))
    if process_facets:
        mesh_editor.init_vertices_global(len(nodes), len(nodes))

    if not vertex_gap:

        for v_id, v_coords in list(iteritems(nodes)):
            handler.add_vertex(v_id, v_coords)
            if process_facets:
                mesh_editor.add_vertex(v_id, np.array(v_coords, dtype=np.float_))

    else:

        for idx, (v_id, v_coords) in enumerate(iteritems(nodes)):
            handler.add_vertex(idx, v_coords)
            if process_facets:
                mesh_editor.add_vertex(idx, np.array(v_coords, dtype=np.float_))

    handler.end_vertices()

    # Write cells to XML file
    handler.start_cells(len(elems))
    if process_facets:
        mesh_editor.init_cells_global(len(elems), len(elems))

    if not vertex_gap and not cell_gap:

        for c_index, c_data in list(iteritems(elems)):
            for v_id in c_data:
                if not (0 <= v_id < len(nodes)):
                    handler.error("Element %s references non-existent node %s" % (c_index, v_id))
            handler.add_cell(c_index, c_data)

            if process_facets:
                c_data_tmp = np.array(c_data)
                c_data_tmp.sort()
                mesh_editor.add_cell(c_index, np.array(c_data_tmp, dtype=np.uintp))


    elif not vertex_gap and cell_gap:

        for idx, (c_index, c_data) in enumerate(iteritems(elems)):
            for v_id in c_data:
                if not (0 <= v_id < len(nodes)):
                    handler.error("Element %s references non-existent node %s" % (c_index, v_id))
            handler.add_cell(idx, c_data)

            if process_facets:
                c_data_tmp = np.array(c_data)
                c_data_tmp.sort()
                mesh_editor.add_cell(idx, np.array(c_data_tmp, dtype=np.uintp))

    else:

        for idx, (c_id, c_data) in enumerate(iteritems(elems)):
            c_nodes = []
            for v_id in c_data:
                try: c_nodes.append(node_ids_order[v_id])
                except ValueError:
                    handler.error("Element %s references non-existent node %s" % (c_id, v_id))
            handler.add_cell(idx, c_nodes)

            if process_facets:
                c_nodes.sort()
                mesh_editor.add_cell(idx, np.array(c_nodes, dtype=np.uintp))

    handler.end_cells()

    # Write MeshValueCollections to XML file
    handler.start_domains()

    # Build a abaqus node ID -> dolfin cell ID map (which is not unique but that is irrelevant here)
    #                           and its local entity.
    if len(node_sets) > 0:
        node_cell_map = {}
        for c_dolfin_index, (c_index, c_data) in enumerate(iteritems(elems)):
            c_data_tmp = np.array(c_data)
            c_data_tmp.sort()
            for local_entity, n_index in enumerate(c_data_tmp):
                node_cell_map[n_index] = (c_dolfin_index, local_entity)

    # Write vertex/node sets
    dim = 0
    for value, (name, node_set) in enumerate(iteritems(node_sets)):
        handler.start_mesh_value_collection(name, dim, len(node_set), "uint")

        for node in node_set:
            try:
                cell, local_entity = node_cell_map[node]
                handler.add_entity_mesh_value_collection(dim, cell, value, local_entity=local_entity)
            except KeyError:
                print("Warning: Boundary references non-existent node %s" % node)
        handler.end_mesh_value_collection()

    # Write cell/element sets
    dim = 3
    for name, s in list(iteritems(cell_sets)):
        handler.start_mesh_value_collection(name, dim, len(s), "uint")
        for cell in s:
            handler.add_entity_mesh_value_collection(dim, cell, 0)
        handler.end_mesh_value_collection()

    # Write surface sets
    if process_facets:
        dim = 2
        nodes_facet_map = _nodes_facet_map(mesh)

        data = [int(0)] * mesh.num_facets()
        S1 = [0, 1, 2]
        S2 = [0, 3, 1]
        S3 = [1, 3, 2]
        S4 = [2, 3, 0]
        node_selector = {'S1': S1,
                         'S2': S2,
                         'S3': S3,
                         'S4': S4,
                         }

        for index, (name, s) in enumerate(iteritems(surface_sets)):
            cell_face_list = []
            for cell_set_name, face_index in s:
                cell_face_list += [(cell, face_index) for cell in cell_sets[cell_set_name]]

            for cell, face in cell_face_list:
                cell_nodes = elems[cell]
                # Extract the face nodes
                face_nodes = [cell_nodes[i] for i in node_selector[face]]
                dolfin_face_nodes = [node_ids_order[n] for n in face_nodes]
                dolfin_face_nodes.sort()
                # Convert the face_nodes to dolfin IDs
                face_id = nodes_facet_map[tuple(dolfin_face_nodes)]
                data[face_id] = index + 1

        # Create and initialise the mesh function
        handler.start_meshfunction("facet_region", dim, mesh.num_facets() )
        for index, physical_region in enumerate (data):
            handler.add_entity_meshfunction(index, physical_region)
        handler.end_meshfunction()


    handler.end_domains()
Exemplo n.º 3
0
def convert(ifilename, handler):
    """ Convert from Abaqus.

    The Abaqus format first defines a node block, then there should be a number
    of elements containing these nodes.
    """

    # Dictionary of nodes (maps node id to coordinates)
    nodes = {}

    # Dictionary of elements (maps cell id to list of cell nodes)
    elems = {}

    # Lists of nodes for given name (key)
    node_sets = {}

    # Lists of cells for given name (key)
    cell_sets = {}

    # Lists of surfaces for given name (key) in the format:
    # {'SS1': [set(['SS1_S1', 'S1']), set(['SS1_S4', 'S4'])]},
    # where SS1 is the name of the surface, SS1_S1 is the name of the
    # cell list whose first face is to be selected, ...
    surface_sets = {}

    # Open file Abaqus file
    file = open(ifilename, 'r')
    csv_file = csv.reader(file, delimiter=',', skipinitialspace=True)

    node_set_name = None
    generate = None

    # Set intial state state
    state = State.Init

    # Read data from input file
    for l in csv_file:

        # Sanity check
        if (len(l) == 0): print("Ooops, zero length.")

        if l[0].startswith('**'):  # Pass over comments
            continue
        elif l[0].startswith('*'):  # Have a keyword
            state = State.Unknown

            if l[0].lower() == "*heading":
                state = State.ReadHeading

            elif l[0].lower() == "*part":
                part_name = _read_part_name(l)

            elif l[0].lower() == "*end part":
                state = State.Invalid

            elif l[0].lower() == "*node":
                node_set_name = _create_node_list_entry(node_sets, l)
                state = State.ReadNodes

            elif l[0].lower() == "*element":
                cell_type, cell_set_name = _read_element_keywords(cell_sets, l)
                state = State.ReadCells

            elif l[0].lower() == "*nset":
                node_set_name, generate = _read_nset_keywords(node_sets, l)
                state = State.ReadNodeSet

            elif l[0].lower() == "*elset":
                cell_set_name, generate = _read_elset_keywords(cell_sets, l)
                if generate:
                    print("WARNING: generation of *elsets not tested.")
                state = State.ReadCellSet

            elif l[0].lower() == "*surface":
                surface_set_name, generate = _read_surface_keywords(
                    surface_sets, l)
                state = State.ReadSurfaceSet

            else:
                print("WARNING: unrecognised Abaqus input keyword:", l[0])
                state = State.Unknown

        else:

            if state == State.ReadHeading:
                model_name = _read_heading(l)

            elif state == State.ReadNodes:
                node_id = int(l[0]) - 1
                coords = [float(c) for c in l[1:]]
                nodes[node_id] = coords
                if node_set_name is not None:
                    node_sets[node_set_name].add(node_id)

            elif state == State.ReadCells:
                cell_id = int(l[0]) - 1
                cell_connectivity = [int(v) - 1 for v in l[1:]]
                elems[cell_id] = cell_connectivity
                if cell_set_name is not None:
                    cell_sets[cell_set_name].add(cell_id)

            elif state == State.ReadNodeSet:

                try:
                    if generate:
                        n0, n1, increment = l
                        node_range = list(
                            range(int(n0) - 1,
                                  int(n1) - 1, int(increment)))
                        node_range.append(int(n1) - 1)
                        node_sets[node_set_name].update(node_range)
                    else:
                        # Strip empty term at end of list, if present
                        if l[-1] == '': l.pop(-1)
                        node_range = [int(n) - 1 for n in l]
                        node_sets[node_set_name].update(node_range)
                except:
                    print("WARNING: Non-integer node sets not yet supported.")

            elif state == State.ReadCellSet:
                try:
                    if generate:
                        n0, n1, increment = l
                        cell_range = list(
                            range(int(n0) - 1,
                                  int(n1) - 1, int(increment)))
                        cell_range.append(int(n1) - 1)
                        cell_sets[cell_set_name].update(cell_range)
                    else:
                        # Strip empty term at end of list, if present
                        if l[-1] == '': l.pop(-1)
                        cell_range = [int(n) - 1 for n in l]
                        cell_sets[cell_set_name].update(cell_range)
                except:
                    print(
                        "WARNING: Non-integer element sets not yet supported.")

            elif state == State.ReadSurfaceSet:
                # Strip empty term at end of list, if present
                if l[-1] == '': l.pop(-1)
                surface_sets[surface_set_name].update([tuple(l)])

            elif state == State.Invalid:  # part
                raise Exception("Inavlid Abaqus parser state..")

    # Close CSV object
    file.close()
    del csv_file

    # Write data to XML file
    # Note that vertices/cells must be consecutively numbered, which
    # isn't necessarily the case in Abaqus. Therefore we enumerate and
    # translate original IDs to sequence indexes if gaps are present.

    # FIXME
    handler.set_mesh_type("tetrahedron", 3)

    process_facets = len(surface_sets) > 0
    if process_facets:
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error(
                "DOLFIN must be installed to handle Abaqus boundary regions")

        mesh = Mesh()
        mesh_editor = MeshEditor()
        mesh_editor.open(mesh, 3, 3)

    node_ids_order = {}
    # Check for gaps in vertex numbering
    node_ids = list(iterkeys(nodes))
    if len(node_ids) > 0:
        vertex_gap = (min(node_ids) != 0 or max(node_ids) != len(node_ids) - 1)
        for x, y in enumerate(node_ids):
            node_ids_order[y] = x  # Maps Abaqus IDs to Dolfin IDs
    else:
        vertex_gap = True

    # Check for gaps in cell numbering
    elemids = list(iterkeys(elems))
    if len(elemids) > 0:
        cell_gap = (min(elemids) != 0 or max(elemids) != len(elemids) - 1)
    else:
        cell_gap = True

    # Write vertices to XML file
    handler.start_vertices(len(nodes))
    if process_facets:
        mesh_editor.init_vertices_global(len(nodes), len(nodes))

    if not vertex_gap:

        for v_id, v_coords in list(iteritems(nodes)):
            handler.add_vertex(v_id, v_coords)
            if process_facets:
                mesh_editor.add_vertex(v_id, np.array(v_coords,
                                                      dtype=np.float_))

    else:

        for idx, (v_id, v_coords) in enumerate(iteritems(nodes)):
            handler.add_vertex(idx, v_coords)
            if process_facets:
                mesh_editor.add_vertex(idx, np.array(v_coords,
                                                     dtype=np.float_))

    handler.end_vertices()

    # Write cells to XML file
    handler.start_cells(len(elems))
    if process_facets:
        mesh_editor.init_cells_global(len(elems), len(elems))

    if not vertex_gap and not cell_gap:

        for c_index, c_data in list(iteritems(elems)):
            for v_id in c_data:
                if not (0 <= v_id < len(nodes)):
                    handler.error(
                        "Element %s references non-existent node %s" %
                        (c_index, v_id))
            handler.add_cell(c_index, c_data)

            if process_facets:
                c_data_tmp = np.array(c_data)
                c_data_tmp.sort()
                mesh_editor.add_cell(c_index,
                                     np.array(c_data_tmp, dtype=np.uintp))

    elif not vertex_gap and cell_gap:

        for idx, (c_index, c_data) in enumerate(iteritems(elems)):
            for v_id in c_data:
                if not (0 <= v_id < len(nodes)):
                    handler.error(
                        "Element %s references non-existent node %s" %
                        (c_index, v_id))
            handler.add_cell(idx, c_data)

            if process_facets:
                c_data_tmp = np.array(c_data)
                c_data_tmp.sort()
                mesh_editor.add_cell(idx, np.array(c_data_tmp, dtype=np.uintp))

    else:

        for idx, (c_id, c_data) in enumerate(iteritems(elems)):
            c_nodes = []
            for v_id in c_data:
                try:
                    c_nodes.append(node_ids_order[v_id])
                except ValueError:
                    handler.error(
                        "Element %s references non-existent node %s" %
                        (c_id, v_id))
            handler.add_cell(idx, c_nodes)

            if process_facets:
                c_nodes.sort()
                mesh_editor.add_cell(idx, np.array(c_nodes, dtype=np.uintp))

    handler.end_cells()

    # Write MeshValueCollections to XML file
    handler.start_domains()

    # Build a abaqus node ID -> dolfin cell ID map (which is not unique but that is irrelevant here)
    #                           and its local entity.
    if len(node_sets) > 0:
        node_cell_map = {}
        for c_dolfin_index, (c_index, c_data) in enumerate(iteritems(elems)):
            c_data_tmp = np.array(c_data)
            c_data_tmp.sort()
            for local_entity, n_index in enumerate(c_data_tmp):
                node_cell_map[n_index] = (c_dolfin_index, local_entity)

    # Write vertex/node sets
    dim = 0
    for value, (name, node_set) in enumerate(iteritems(node_sets)):
        handler.start_mesh_value_collection(name, dim, len(node_set), "uint")

        for node in node_set:
            try:
                cell, local_entity = node_cell_map[node]
                handler.add_entity_mesh_value_collection(
                    dim, cell, value, local_entity=local_entity)
            except KeyError:
                print("Warning: Boundary references non-existent node %s" %
                      node)
        handler.end_mesh_value_collection()

    # Write cell/element sets
    dim = 3
    for name, s in list(iteritems(cell_sets)):
        handler.start_mesh_value_collection(name, dim, len(s), "uint")
        for cell in s:
            handler.add_entity_mesh_value_collection(dim, cell, 0)
        handler.end_mesh_value_collection()

    # Write surface sets
    if process_facets:
        dim = 2
        nodes_facet_map = _nodes_facet_map(mesh)

        data = [int(0)] * mesh.num_facets()
        S1 = [0, 1, 2]
        S2 = [0, 3, 1]
        S3 = [1, 3, 2]
        S4 = [2, 3, 0]
        node_selector = {
            'S1': S1,
            'S2': S2,
            'S3': S3,
            'S4': S4,
        }

        for index, (name, s) in enumerate(iteritems(surface_sets)):
            cell_face_list = []
            for cell_set_name, face_index in s:
                cell_face_list += [(cell, face_index)
                                   for cell in cell_sets[cell_set_name]]

            for cell, face in cell_face_list:
                cell_nodes = elems[cell]
                # Extract the face nodes
                face_nodes = [cell_nodes[i] for i in node_selector[face]]
                dolfin_face_nodes = [node_ids_order[n] for n in face_nodes]
                dolfin_face_nodes.sort()
                # Convert the face_nodes to dolfin IDs
                face_id = nodes_facet_map[tuple(dolfin_face_nodes)]
                data[face_id] = index + 1

        # Create and initialise the mesh function
        handler.start_meshfunction("facet_region", dim, mesh.num_facets())
        for index, physical_region in enumerate(data):
            handler.add_entity_meshfunction(index, physical_region)
        handler.end_meshfunction()

    handler.end_domains()
Exemplo n.º 4
0
def gmsh2xml(ifilename, handler):
    """Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
    parser implemented as a state machine:

        0 = read 'MeshFormat'
        1 = read  mesh format data
        2 = read 'EndMeshFormat'
        3 = read 'Nodes'
        4 = read  number of vertices
        5 = read  vertices
        6 = read 'EndNodes'
        7 = read 'Elements'
        8 = read  number of cells
        9 = read  cells
        10 = done

    Afterwards, extract physical region numbers if they are defined in
    the mesh file as a mesh function.

    """

    print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")

    # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
    gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
    cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
    # the gmsh element types supported for conversion
    supported_gmsh_element_types = [1, 2, 4, 15]

    # Open files
    ifile = open(ifilename, "r")

    # Scan file for cell type
    cell_type = None
    highest_dim = 0
    line = ifile.readline()
    while line:

        # Remove newline
        line = line.rstrip("\n\r")

        # Read dimension
        if line.find("$Elements") == 0:

            line = ifile.readline()
            num_elements = int(line)
            if num_elements == 0:
                _error("No elements found in gmsh file.")
            line = ifile.readline()

            # Now iterate through elements to find largest dimension.  Gmsh
            # format might include elements of lower dimensions in the element list.
            # We also need to count number of elements of correct dimensions.
            # Also determine which vertices are not used.
            dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
            vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
            # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
            tags_for_dim = {0: [], 1: [], 2: [], 3: []}

            while line.find("$EndElements") == -1:
                element = line.split()
                elem_type = int(element[1])
                num_tags = int(element[2])
                if elem_type in supported_gmsh_element_types:
                    dim = gmsh_dim[elem_type]
                    if highest_dim < dim:
                        highest_dim = dim
                    node_num_list = [int(node) for node in element[3 + num_tags:]]
                    vertices_used_for_dim[dim].extend(node_num_list)
                    if num_tags > 0:
                        tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
                    dim_count[dim] += 1
                else:
                    #TODO: output a warning here. "gmsh element type %d not supported" % elem_type
                    pass
                line = ifile.readline()
        else:
            # Read next line
            line = ifile.readline()

    # Check that we got the cell type and set num_cells_counted
    if highest_dim == 0:
        _error("Unable to find cells of supported type.")

    num_cells_counted = dim_count[highest_dim]
    vertex_set = set(vertices_used_for_dim[highest_dim])
    vertices_used_for_dim[highest_dim] = None

    vertex_dict = {}
    for n,v in enumerate(vertex_set):
        vertex_dict[v] = n

    # Step to beginning of file
    ifile.seek(0)

    # Set mesh type
    handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)

    # Initialise node list (gmsh does not export all vertexes in order)
    nodelist = {}

    # Current state
    state = 0

    # Write data
    num_vertices_read = 0
    num_cells_read = 0

    # Only import the dolfin objects if facet markings exist
    process_facets = False
    if len(tags_for_dim[highest_dim-1]) > 0:
        # first construct the mesh
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error("DOLFIN must be installed to handle Gmsh boundary regions")
        mesh = Mesh()
        mesh_editor = MeshEditor ()
        mesh_editor.open( mesh, highest_dim, highest_dim )
        process_facets = True
    else:
        # TODO: Output a warning or an error here
        me = None

    while state != 10:

        # Read next line
        line = ifile.readline()
        if not line: break

        # Skip comments
        if line[0] == '#':
            continue

        # Remove newline
        line = line.rstrip("\n\r")

        if state == 0:
            if line == "$MeshFormat":
                state = 1
        elif state == 1:
            (version, file_type, data_size) = line.split()
            state = 2
        elif state == 2:
            if line == "$EndMeshFormat":
                state = 3
        elif state == 3:
            if line == "$Nodes":
                state = 4
        elif state == 4:
            num_vertices = len(vertex_dict)
            handler.start_vertices(num_vertices)
            if process_facets:
                mesh_editor.init_vertices_global(num_vertices, num_vertices)
            state = 5
        elif state == 5:
            (node_no, x, y, z) = line.split()
            node_no = int(node_no)
            x,y,z = [float(xx) for xx in (x,y,z)]
            if node_no in vertex_dict:
                node_no = vertex_dict[node_no]
            else:
                continue
            nodelist[int(node_no)] = num_vertices_read
            handler.add_vertex(num_vertices_read, [x, y, z])
            if process_facets:
                if highest_dim == 1:
                    coords = numpy.array([x])
                elif highest_dim == 2:
                    coords = numpy.array([x, y])
                elif highest_dim == 3:
                    coords = numpy.array([x, y, z])
                mesh_editor.add_vertex(num_vertices_read, coords)

            num_vertices_read +=1

            if num_vertices == num_vertices_read:
                handler.end_vertices()
                state = 6
        elif state == 6:
            if line == "$EndNodes":
                state = 7
        elif state == 7:
            if line == "$Elements":
                state = 8
        elif state == 8:
            handler.start_cells(num_cells_counted)
            if process_facets:
                mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)

            state = 9
        elif state == 9:
            element = line.split()
            elem_type = int(element[1])
            num_tags  = int(element[2])
            if elem_type in supported_gmsh_element_types:
                dim = gmsh_dim[elem_type]
            else:
                dim = 0
            if dim == highest_dim:
                node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
                for node in node_num_list:
                    if not node in nodelist:
                        _error("Vertex %d of %s %d not previously defined." %
                              (node, cell_type_for_dim[dim], num_cells_read))
                cell_nodes = [nodelist[n] for n in node_num_list]
                handler.add_cell(num_cells_read, cell_nodes)

                if process_facets:
                    cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
                    mesh_editor.add_cell(num_cells_read, cell_nodes)

                num_cells_read +=1

            if num_cells_counted == num_cells_read:
                handler.end_cells()
                if process_facets:
                    mesh_editor.close()
                state = 10
        elif state == 10:
            break

    # Write mesh function based on the Physical Regions defined by
    # gmsh, but only if they are not all zero. All zero physical
    # regions indicate that no physical regions were defined.
    if highest_dim not in [1,2,3]:
        _error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)

    tags = tags_for_dim[highest_dim]
    physical_regions = tuple(tag[0] for tag in tags)
    if not all(tag == 0 for tag in physical_regions):
        handler.start_meshfunction("physical_region", dim, num_cells_counted)
        for i, physical_region in enumerate(physical_regions):
            handler.add_entity_meshfunction(i, physical_region)
        handler.end_meshfunction()

    # Now process the facet markers
    tags = tags_for_dim[highest_dim-1]
    if (len(tags) > 0) and (mesh is not None):
        physical_regions = tuple(tag[0] for tag in tags)
        if not all(tag == 0 for tag in physical_regions):
            mesh.init(highest_dim-1,0)

            # Get the facet-node connectivity information (reshape as a row of node indices per facet)
            if highest_dim==1:
              # for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
              # as facets are vertices
              facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
            else:
              facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )

            # Build the reverse map
            nodes_as_facets = {}
            for facet in range(mesh.num_facets()):
              nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet

            data = [int(0*k) for k in range(mesh.num_facets()) ]
            for i, physical_region in enumerate(physical_regions):
                nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
                nodes.sort()

                if physical_region != 0:
                    try:
                        index = nodes_as_facets[tuple(nodes)]
                        data[index] = physical_region
                    except IndexError:
                        raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )

            # Create and initialise the mesh function
            handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
            for index, physical_region in enumerate ( data ):
                handler.add_entity_meshfunction(index, physical_region)
            handler.end_meshfunction()

    # Check that we got all data
    if state == 10:
        print("Conversion done")
    else:
       _error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")

    # Close files
    ifile.close()
Exemplo n.º 5
0
def create_submesh(mesh, markers, marker):
    "This function allows for a SubMesh-equivalent to be created in parallel"
    # Build mesh
    submesh = Mesh()
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())

    # Return empty mesh if no matching markers
    if MPI.sum(mpi_comm_world(), int(marker in markers.array())) == 0:
        cbc_warning(
            "Unable to find matching markers in meshfunction. Submesh is empty."
        )
        mesh_editor.close()
        return submesh

    base_cell_indices = np.where(markers.array() == marker)[0]
    base_cells = mesh.cells()[base_cell_indices]
    base_vertex_indices = np.unique(base_cells.flatten())

    base_global_vertex_indices = sorted(
        [mesh.topology().global_indices(0)[vi] for vi in base_vertex_indices])

    gi = mesh.topology().global_indices(0)
    shared_local_indices = set(base_vertex_indices).intersection(
        set(mesh.topology().shared_entities(0).keys()))
    shared_global_indices = [gi[vi] for vi in shared_local_indices]

    unshared_global_indices = list(
        set(base_global_vertex_indices) - set(shared_global_indices))
    unshared_vertices_dist = distribution(len(unshared_global_indices))

    # Number unshared vertices on separate process
    idx = sum(unshared_vertices_dist[:MPI.rank(mpi_comm_world())])
    base_to_sub_global_indices = {}
    for gi in unshared_global_indices:
        base_to_sub_global_indices[gi] = idx
        idx += 1

    # Gather all shared process on process 0 and assign global index
    all_shared_global_indices = gather(shared_global_indices,
                                       on_process=0,
                                       flatten=True)
    all_shared_global_indices = np.unique(all_shared_global_indices)

    shared_base_to_sub_global_indices = {}
    idx = int(
        MPI.max(mpi_comm_world(),
                float(max(base_to_sub_global_indices.values() + [-1e16]))) + 1)
    if MPI.rank(mpi_comm_world()) == 0:
        for gi in all_shared_global_indices:
            shared_base_to_sub_global_indices[int(gi)] = idx
            idx += 1

    # Broadcast global numbering of all shared vertices
    shared_base_to_sub_global_indices = dict(
        zip(broadcast(shared_base_to_sub_global_indices.keys(), 0),
            broadcast(shared_base_to_sub_global_indices.values(), 0)))

    # Join shared and unshared numbering in one dict
    base_to_sub_global_indices = dict(
        base_to_sub_global_indices.items() +
        shared_base_to_sub_global_indices.items())

    # Create mapping of local indices
    base_to_sub_local_indices = dict(
        zip(base_vertex_indices, range(len(base_vertex_indices))))

    # Define sub-cells
    sub_cells = [None] * len(base_cells)
    for i, c in enumerate(base_cells):
        sub_cells[i] = [base_to_sub_local_indices[j] for j in c]

    # Store vertices as sub_vertices[local_index] = (global_index, coordinates)
    sub_vertices = {}
    for base_local, sub_local in base_to_sub_local_indices.items():
        sub_vertices[sub_local] = (base_to_sub_global_indices[
            mesh.topology().global_indices(0)[base_local]],
                                   mesh.coordinates()[base_local])

    ## Done with base mesh

    # Distribute meshdata on (if any) empty processes
    sub_cells, sub_vertices = distribute_meshdata(sub_cells, sub_vertices)
    global_cell_distribution = distribution(len(sub_cells))
    #global_vertex_distribution = distribution(len(sub_vertices))

    global_num_cells = MPI.sum(mpi_comm_world(), len(sub_cells))
    global_num_vertices = sum(unshared_vertices_dist) + MPI.sum(
        mpi_comm_world(), len(all_shared_global_indices))

    mesh_editor.init_vertices(len(sub_vertices))
    #mesh_editor.init_cells(len(sub_cells))
    mesh_editor.init_cells_global(len(sub_cells), global_num_cells)
    global_index_start = sum(
        global_cell_distribution[:MPI.rank(mesh.mpi_comm())])

    for index, cell in enumerate(sub_cells):
        if LooseVersion(dolfin_version()) >= LooseVersion("1.6.0"):
            mesh_editor.add_cell(index, *cell)
        else:
            mesh_editor.add_cell(int(index), global_index_start + index,
                                 np.array(cell, dtype=np.uintp))

    for local_index, (global_index, coordinates) in sub_vertices.items():
        #print coordinates
        mesh_editor.add_vertex_global(int(local_index), int(global_index),
                                      coordinates)

    mesh_editor.close()

    submesh.topology().init(0, len(sub_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(),
                            len(sub_cells), global_num_cells)

    # FIXME: Set up shared entities
    # What damage does this do?
    submesh.topology().shared_entities(0)[0] = []
    # The code below sets up shared vertices, but lacks shared facets.
    # It is considered incomplete, and therefore commented out
    '''
    #submesh.topology().shared_entities(0)[0] = []
    from dolfin import compile_extension_module
    cpp_code = """
    void set_shared_entities(Mesh& mesh, std::size_t idx, const Array<std::size_t>& other_processes)
    {
        std::set<unsigned int> set_other_processes;
        for (std::size_t i=0; i<other_processes.size(); i++)
        {
            set_other_processes.insert(other_processes[i]);
            //std::cout << idx << " --> " << other_processes[i] << std::endl;
        }
        //std::cout << idx << " --> " << set_other_processes[0] << std::endl;
        mesh.topology().shared_entities(0)[idx] = set_other_processes;
    }
    """

    set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
    base_se = mesh.topology().shared_entities(0)
    se = submesh.topology().shared_entities(0)

    for li in shared_local_indices:
        arr = np.array(base_se[li], dtype=np.uintp)
        sub_li = base_to_sub_local_indices[li]
        set_shared_entities(submesh, base_to_sub_local_indices[li], arr)
    '''
    return submesh
Exemplo n.º 6
0
def gmsh2xml(ifilename, handler):
    """Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
    parser implemented as a state machine:

        0 = read 'MeshFormat'
        1 = read  mesh format data
        2 = read 'EndMeshFormat'
        3 = read 'Nodes'
        4 = read  number of vertices
        5 = read  vertices
        6 = read 'EndNodes'
        7 = read 'Elements'
        8 = read  number of cells
        9 = read  cells
        10 = done

    Afterwards, extract physical region numbers if they are defined in
    the mesh file as a mesh function.

    """

    print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")

    # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
    gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
    cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
    # the gmsh element types supported for conversion
    supported_gmsh_element_types = [1, 2, 4, 15]

    # Open files
    ifile = open(ifilename, "r")

    # Scan file for cell type
    cell_type = None
    highest_dim = 0
    line = ifile.readline()
    while line:

        # Remove newline
        line = line.rstrip("\n\r")

        # Read dimension
        if line.find("$Elements") == 0:

            line = ifile.readline()
            num_elements = int(line)
            if num_elements == 0:
                _error("No elements found in gmsh file.")
            line = ifile.readline()

            # Now iterate through elements to find largest dimension.  Gmsh
            # format might include elements of lower dimensions in the element list.
            # We also need to count number of elements of correct dimensions.
            # Also determine which vertices are not used.
            dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
            vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
            # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
            tags_for_dim = {0: [], 1: [], 2: [], 3: []}

            while line.find("$EndElements") == -1:
                element = line.split()
                elem_type = int(element[1])
                num_tags = int(element[2])
                if elem_type in supported_gmsh_element_types:
                    dim = gmsh_dim[elem_type]
                    if highest_dim < dim:
                        highest_dim = dim
                    node_num_list = [int(node) for node in element[3 + num_tags:]]
                    vertices_used_for_dim[dim].extend(node_num_list)
                    if num_tags > 0:
                        tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
                    dim_count[dim] += 1
                else:
                    #TODO: output a warning here. "gmsh element type %d not supported" % elem_type
                    pass
                line = ifile.readline()
        else:
            # Read next line
            line = ifile.readline()

    # Check that we got the cell type and set num_cells_counted
    if highest_dim == 0:
        _error("Unable to find cells of supported type.")

    num_cells_counted = dim_count[highest_dim]
    vertex_set = set(vertices_used_for_dim[highest_dim])
    vertices_used_for_dim[highest_dim] = None

    vertex_dict = {}
    for n,v in enumerate(vertex_set):
        vertex_dict[v] = n

    # Step to beginning of file
    ifile.seek(0)

    # Set mesh type
    handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)

    # Initialise node list (gmsh does not export all vertexes in order)
    nodelist = {}

    # Current state
    state = 0

    # Write data
    num_vertices_read = 0
    num_cells_read = 0

    # Only import the dolfin objects if facet markings exist
    process_facets = False
    if len(tags_for_dim[highest_dim-1]) > 0:
        # first construct the mesh
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error("DOLFIN must be installed to handle Gmsh boundary regions")
        mesh = Mesh()
        mesh_editor = MeshEditor ()
        mesh_editor.open( mesh, highest_dim, highest_dim )
        process_facets = True
    else:
        # TODO: Output a warning or an error here
        me = None

    while state != 10:

        # Read next line
        line = ifile.readline()
        if not line: break

        # Skip comments
        if line[0] == '#':
            continue

        # Remove newline
        line = line.rstrip("\n\r")

        if state == 0:
            if line == "$MeshFormat":
                state = 1
        elif state == 1:
            (version, file_type, data_size) = line.split()
            state = 2
        elif state == 2:
            if line == "$EndMeshFormat":
                state = 3
        elif state == 3:
            if line == "$Nodes":
                state = 4
        elif state == 4:
            num_vertices = len(vertex_dict)
            handler.start_vertices(num_vertices)
            if process_facets:
                mesh_editor.init_vertices_global(num_vertices, num_vertices)
            state = 5
        elif state == 5:
            (node_no, x, y, z) = line.split()
            node_no = int(node_no)
            x,y,z = [float(xx) for xx in (x,y,z)]
            if node_no in vertex_dict:
                node_no = vertex_dict[node_no]
            else:
                continue
            nodelist[int(node_no)] = num_vertices_read
            handler.add_vertex(num_vertices_read, [x, y, z])
            if process_facets:
                if highest_dim == 1:
                    coords = numpy.array([x])
                elif highest_dim == 2:
                    coords = numpy.array([x, y])
                elif highest_dim == 3:
                    coords = numpy.array([x, y, z])
                mesh_editor.add_vertex(num_vertices_read, coords)

            num_vertices_read +=1

            if num_vertices == num_vertices_read:
                handler.end_vertices()
                state = 6
        elif state == 6:
            if line == "$EndNodes":
                state = 7
        elif state == 7:
            if line == "$Elements":
                state = 8
        elif state == 8:
            handler.start_cells(num_cells_counted)
            if process_facets:
                mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)

            state = 9
        elif state == 9:
            element = line.split()
            elem_type = int(element[1])
            num_tags  = int(element[2])
            if elem_type in supported_gmsh_element_types:
                dim = gmsh_dim[elem_type]
            else:
                dim = 0
            if dim == highest_dim:
                node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
                for node in node_num_list:
                    if not node in nodelist:
                        _error("Vertex %d of %s %d not previously defined." %
                              (node, cell_type_for_dim[dim], num_cells_read))
                cell_nodes = [nodelist[n] for n in node_num_list]
                handler.add_cell(num_cells_read, cell_nodes)

                if process_facets:
                    cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
                    mesh_editor.add_cell(num_cells_read, cell_nodes)

                num_cells_read +=1

            if num_cells_counted == num_cells_read:
                handler.end_cells()
                if process_facets:
                    mesh_editor.close()
                state = 10
        elif state == 10:
            break

    # Write mesh function based on the Physical Regions defined by
    # gmsh, but only if they are not all zero. All zero physical
    # regions indicate that no physical regions were defined.
    if highest_dim not in [1,2,3]:
        _error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)

    tags = tags_for_dim[highest_dim]
    physical_regions = tuple(tag[0] for tag in tags)
    if not all(tag == 0 for tag in physical_regions):
        handler.start_meshfunction("physical_region", dim, num_cells_counted)
        for i, physical_region in enumerate(physical_regions):
            handler.add_entity_meshfunction(i, physical_region)
        handler.end_meshfunction()

    # Now process the facet markers
    tags = tags_for_dim[highest_dim-1]
    if (len(tags) > 0) and (mesh is not None):
        physical_regions = tuple(tag[0] for tag in tags)
        if not all(tag == 0 for tag in physical_regions):
            mesh.init(highest_dim-1,0)

            # Get the facet-node connectivity information (reshape as a row of node indices per facet)
            if highest_dim==1:
              # for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
              # as facets are vertices
              facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
            else:
              facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )

            # Build the reverse map
            nodes_as_facets = {}
            for facet in range(mesh.num_facets()):
              nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet

            data = [int(0*k) for k in range(mesh.num_facets()) ]
            for i, physical_region in enumerate(physical_regions):
                nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
                nodes.sort()

                if physical_region != 0:
                    try:
                        index = nodes_as_facets[tuple(nodes)]
                        data[index] = physical_region
                    except IndexError:
                        raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )

            # Create and initialise the mesh function
            handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
            for index, physical_region in enumerate ( data ):
                handler.add_entity_meshfunction(index, physical_region)
            handler.end_meshfunction()

    # Check that we got all data
    if state == 10:
        print("Conversion done")
    else:
       _error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")

    # Close files
    ifile.close()
Exemplo n.º 7
0
def create_submesh(mesh, markers):
    mpi_comm = mesh.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    assert isinstance(markers, MeshFunctionBool)
    assert markers.dim() == mesh.topology().dim()
    marker_id = True
    
    # == 1. Extract marked cells == #
    # Dolfin does not support a distributed mesh that is empty on some processes.
    # cbcpost gets around this by moving a single cell from the a non-empty processor to an empty one.
    # Note that, however, this cannot work if the number of marked cell is less than the number of processors.
    # In the interest of considering this case, we enable at least one cell (arbitrarily) on each processor.
    # We find this solution acceptable for our purposes, despite the increase of the reduced mesh size,
    # since we are never actually interested in solving a PDE on the reduced mesh, but rather only in
    # assemblying tensors on it and extract their values at some locations.
    backup_first_marker_id = None
    if marker_id not in markers.array():
        backup_first_marker_id = markers.array()[0]
        markers.array()[0] = marker_id
    assert marker_id in markers.array()
    
    # == 2. Create submesh == #
    submesh = Mesh(mesh.mpi_comm())
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())
    # Extract cells from mesh with specified marker_id
    mesh_cell_indices = where(markers.array() == marker_id)[0]
    mesh_cells = mesh.cells()[mesh_cell_indices]
    mesh_global_cell_indices = sorted([mesh.topology().global_indices(mesh.topology().dim())[cell_index] for cell_index in mesh_cell_indices])
    # Get vertices of extracted cells
    mesh_vertex_indices = unique(mesh_cells.flatten())
    mesh_global_vertex_indices = sorted([mesh.topology().global_indices(0)[vertex_index] for vertex_index in mesh_vertex_indices])
    # Number vertices in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all vertices from all processors
    allgathered_mesh_global_vertex_indices__non_empty_processors = list()
    allgathered_mesh_global_vertex_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_vertex_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
        else:
            allgathered_mesh_global_vertex_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
    allgathered_mesh_global_vertex_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__non_empty_processors))
    allgathered_mesh_global_vertex_indices__empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global vertex index to submesh global vertex index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_vertex_global_indices = dict()
    _submesh_vertex_global_index = 0
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__non_empty_processors:
        assert mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices
        allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
        _submesh_vertex_global_index += 1
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__empty_processors:
        if mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices:
            allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
            _submesh_vertex_global_index += 1
    # Number cells in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all cells from all processors
    allgathered_mesh_global_cell_indices__non_empty_processors = list()
    allgathered_mesh_global_cell_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_cell_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
        else:
            allgathered_mesh_global_cell_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
    allgathered_mesh_global_cell_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__non_empty_processors))
    allgathered_mesh_global_cell_indices__empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global cell index to submesh global cell index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_cell_global_indices = dict()
    _submesh_cell_global_index = 0
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__non_empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    # Also create a mapping from mesh local vertex index to submesh local vertex index.
    mesh_to_submesh_vertex_local_indices = dict(zip(mesh_vertex_indices, list(range(len(mesh_vertex_indices)))))
    # Also create a mapping from mesh local cell index to submesh local cell index.
    mesh_to_submesh_cell_local_indices = dict(zip(mesh_cell_indices, list(range(len(mesh_cell_indices)))))
    # Now, define submesh cells
    submesh_cells = list()
    for i, c in enumerate(mesh_cells):
        submesh_cells.append([mesh_to_submesh_vertex_local_indices[j] for j in c])
    # Store vertices as submesh_vertices[local_index] = (global_index, coordinates)
    submesh_vertices = dict()
    for mesh_vertex_local_index, submesh_vertex_local_index in mesh_to_submesh_vertex_local_indices.items():
        submesh_vertices[submesh_vertex_local_index] = (
            allgathered_mesh_to_submesh_vertex_global_indices[mesh.topology().global_indices(0)[mesh_vertex_local_index]],
            mesh.coordinates()[mesh_vertex_local_index]
        )
    # Collect the global number of vertices and cells
    global_num_cells = mpi_comm.allreduce(len(submesh_cells), op=SUM)
    global_num_vertices = len(allgathered_mesh_to_submesh_vertex_global_indices)
    # Fill in mesh_editor
    mesh_editor.init_vertices_global(len(submesh_vertices), global_num_vertices)
    mesh_editor.init_cells_global(len(submesh_cells), global_num_cells)
    for local_index, cell_vertices in enumerate(submesh_cells):
        if has_pybind11():
            mesh_editor.add_cell(local_index, cell_vertices)
        else:
            mesh_editor.add_cell(local_index, *cell_vertices)
    for local_index, (global_index, coordinates) in submesh_vertices.items():
        mesh_editor.add_vertex_global(local_index, global_index, coordinates)
    mesh_editor.close()
    # Initialize topology
    submesh.topology().init(0, len(submesh_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(submesh_cells), global_num_cells)
    # Correct the global index of cells
    for local_index in range(len(submesh_cells)):
        submesh.topology().set_global_index(
            submesh.topology().dim(),
            local_index,
            allgathered_mesh_to_submesh_cell_global_indices[mesh_global_cell_indices[local_index]]
        )
    
    # == 3. Store (local) mesh to/from submesh map for cells, facets and vertices == #
    # Cells
    submesh.mesh_to_submesh_cell_local_indices = mesh_to_submesh_cell_local_indices
    submesh.submesh_to_mesh_cell_local_indices = mesh_cell_indices
    # Vertices
    submesh.mesh_to_submesh_vertex_local_indices = mesh_to_submesh_vertex_local_indices
    submesh.submesh_to_mesh_vertex_local_indices = mesh_vertex_indices
    # Facets
    mesh_vertices_to_mesh_facets = dict()
    mesh_facets_to_mesh_vertices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facet_vertices = list()
            for mesh_facet_vertex in vertices(mesh_facet):
                mesh_facet_vertices.append(mesh_facet_vertex.index())
            mesh_facet_vertices = tuple(sorted(mesh_facet_vertices))
            if mesh_facet_vertices in mesh_vertices_to_mesh_facets:
                assert mesh_vertices_to_mesh_facets[mesh_facet_vertices] == mesh_facet.index()
            else:
                mesh_vertices_to_mesh_facets[mesh_facet_vertices] = mesh_facet.index()
            if mesh_facet.index() in mesh_facets_to_mesh_vertices:
                assert mesh_facets_to_mesh_vertices[mesh_facet.index()] == mesh_facet_vertices
            else:
                mesh_facets_to_mesh_vertices[mesh_facet.index()] = mesh_facet_vertices
    submesh_vertices_to_submesh_facets = dict()
    submesh_facets_to_submesh_vertices = dict()
    for submesh_facet in facets(submesh):
        submesh_facet_vertices = list()
        for submesh_facet_vertex in vertices(submesh_facet):
            submesh_facet_vertices.append(submesh_facet_vertex.index())
        submesh_facet_vertices = tuple(sorted(submesh_facet_vertices))
        assert submesh_facet_vertices not in submesh_vertices_to_submesh_facets
        submesh_vertices_to_submesh_facets[submesh_facet_vertices] = submesh_facet.index()
        assert submesh_facet.index() not in submesh_facets_to_submesh_vertices
        submesh_facets_to_submesh_vertices[submesh_facet.index()] = submesh_facet_vertices
    mesh_to_submesh_facets_local_indices = dict()
    for (mesh_facet_index, mesh_vertices) in mesh_facets_to_mesh_vertices.items():
        submesh_vertices = tuple(sorted([submesh.mesh_to_submesh_vertex_local_indices[mesh_vertex] for mesh_vertex in mesh_vertices]))
        submesh_facet_index = submesh_vertices_to_submesh_facets[submesh_vertices]
        mesh_to_submesh_facets_local_indices[mesh_facet_index] = submesh_facet_index
    submesh_to_mesh_facets_local_indices = dict()
    for (submesh_facet_index, submesh_vertices) in submesh_facets_to_submesh_vertices.items():
        mesh_vertices = tuple(sorted([submesh.submesh_to_mesh_vertex_local_indices[submesh_vertex] for submesh_vertex in submesh_vertices]))
        mesh_facet_index = mesh_vertices_to_mesh_facets[mesh_vertices]
        submesh_to_mesh_facets_local_indices[submesh_facet_index] = mesh_facet_index
    submesh.mesh_to_submesh_facet_local_indices = mesh_to_submesh_facets_local_indices
    submesh.submesh_to_mesh_facet_local_indices = list()
    assert min(submesh_to_mesh_facets_local_indices.keys()) == 0
    assert max(submesh_to_mesh_facets_local_indices.keys()) == len(submesh_to_mesh_facets_local_indices.keys()) - 1
    for submesh_facet_index in range(len(submesh_to_mesh_facets_local_indices)):
        submesh.submesh_to_mesh_facet_local_indices.append(submesh_to_mesh_facets_local_indices[submesh_facet_index])
    # == 3bis. Prepare (temporary) global indices of facets == #
    # Wrapper to DistributedMeshTools::number_entities
    if has_pybind11():
        cpp_code = """
            #include <pybind11/pybind11.h>
            #include <dolfin/mesh/DistributedMeshTools.h>
            #include <dolfin/mesh/Mesh.h>
            
            void initialize_global_indices(std::shared_ptr<dolfin::Mesh> mesh, std::size_t dim)
            {
                dolfin::DistributedMeshTools::number_entities(*mesh, dim);
            }
            
            PYBIND11_MODULE(SIGNATURE, m)
            {
                m.def("initialize_global_indices", &initialize_global_indices);
            }
        """
        initialize_global_indices = compile_cpp_code(cpp_code).initialize_global_indices
    else:
        cpp_code = """
            void initialize_global_indices(Mesh & mesh, std::size_t dim)
            {
                DistributedMeshTools::number_entities(mesh, dim);
            }
        """
        initialize_global_indices = compile_extension_module(cpp_code, additional_system_headers=["dolfin/mesh/DistributedMeshTools.h"]).initialize_global_indices
    initialize_global_indices(mesh, mesh.topology().dim() - 1)
    # Prepare global indices of facets
    mesh_facets_local_to_global_indices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facets_local_to_global_indices[mesh_facet.index()] = mesh_facet.global_index()
    mesh_facets_global_indices_in_submesh = list()
    for mesh_facet_local_index in mesh_to_submesh_facets_local_indices.keys():
        mesh_facets_global_indices_in_submesh.append(mesh_facets_local_to_global_indices[mesh_facet_local_index])
    allgathered__mesh_facets_global_indices_in_submesh = list()
    for r in range(mpi_comm.size):
        allgathered__mesh_facets_global_indices_in_submesh.extend(mpi_comm.bcast(mesh_facets_global_indices_in_submesh, root=r))
    allgathered__mesh_facets_global_indices_in_submesh = sorted(set(allgathered__mesh_facets_global_indices_in_submesh))
    mesh_to_submesh_facets_global_indices = dict()
    for (submesh_facet_global_index, mesh_facet_global_index) in enumerate(allgathered__mesh_facets_global_indices_in_submesh):
        mesh_to_submesh_facets_global_indices[mesh_facet_global_index] = submesh_facet_global_index
    submesh_facets_local_to_global_indices = dict()
    for (submesh_facet_local_index, mesh_facet_local_index) in submesh_to_mesh_facets_local_indices.items():
        submesh_facets_local_to_global_indices[submesh_facet_local_index] = mesh_to_submesh_facets_global_indices[mesh_facets_local_to_global_indices[mesh_facet_local_index]]
    
    # == 4. Assign shared vertices == #
    shared_entities_dimensions = {
        "vertex": 0,
        "facet": submesh.topology().dim() - 1,
        "cell": submesh.topology().dim()
    }
    shared_entities_class = {
        "vertex": Vertex,
        "facet": Facet,
        "cell": Cell
    }
    shared_entities_iterator = {
        "vertex": vertices,
        "facet": facets,
        "cell": cells
    }
    shared_entities_submesh_global_index_getter = {
        "vertex": lambda entity: entity.global_index(),
        "facet": lambda entity: submesh_facets_local_to_global_indices[entity.index()],
        "cell": lambda entity: entity.global_index()
    }
    for entity_type in ["vertex", "facet", "cell"]: # do not use .keys() because the order is important
        dim = shared_entities_dimensions[entity_type]
        class_ = shared_entities_class[entity_type]
        iterator = shared_entities_iterator[entity_type]
        submesh_global_index_getter = shared_entities_submesh_global_index_getter[entity_type]
        # Get shared entities from mesh. A subset of these will end being shared entities also the submesh
        # (thanks to the fact that we do not redistribute cells from one processor to another)
        if mpi_comm.size > 1: # some entities may not be initialized in serial, since they are not needed
            assert mesh.topology().have_shared_entities(dim), "Mesh shared entities have not been initialized for dimension " + str(dim)
        if mesh.topology().have_shared_entities(dim): # always true in parallel (when really needed)
            # However, it may happen that an entity which has been selected is not shared anymore because only one of
            # the sharing processes has it in the submesh. For instance, consider the case
            # of two cells across the interface (located on a facet f) between two processors. It may happen that
            # only one of the two cells is selected: the facet f and its vertices are not shared anymore!
            # For this reason, we create a new dict from global entity index to processors sharing them. Thus ...
            # ... first of all get global indices corresponding to local entities
            if entity_type in ["vertex", "cell"]:
                assert submesh.topology().have_global_indices(dim), "Submesh global indices have not been initialized for dimension " + str(dim)
            submesh_local_entities_global_index = list()
            submesh_local_entities_global_to_local_index = dict()
            for entity in iterator(submesh):
                local_entity_index = entity.index()
                global_entity_index = submesh_global_index_getter(entity)
                submesh_local_entities_global_index.append(global_entity_index)
                submesh_local_entities_global_to_local_index[global_entity_index] = local_entity_index
            # ... then gather all global indices from all processors
            gathered__submesh_local_entities_global_index = list() # over processor id
            for r in range(mpi_comm.size):
                gathered__submesh_local_entities_global_index.append(mpi_comm.bcast(submesh_local_entities_global_index, root=r))
            # ... then create dict from global index to processors sharing it
            submesh_shared_entities__global = dict()
            for r in range(mpi_comm.size):
                for global_entity_index in gathered__submesh_local_entities_global_index[r]:
                    if global_entity_index not in submesh_shared_entities__global:
                        submesh_shared_entities__global[global_entity_index] = list()
                    submesh_shared_entities__global[global_entity_index].append(r)
            # ... and finally popuplate shared entities dict, which is the same as the dict above except that
            # the current processor rank is removed and a local indexing is used
            submesh_shared_entities = dict() # from local index to list of integers
            for (global_entity_index, processors) in submesh_shared_entities__global.items():
                if (
                    mpi_comm.rank in processors  # only local entities
                        and
                    len(processors) > 1 # it was still shared after submesh extraction
                ):
                    other_processors_list = list(processors)
                    other_processors_list.remove(mpi_comm.rank)
                    other_processors = array(other_processors_list, dtype=uintp)
                    submesh_shared_entities[submesh_local_entities_global_to_local_index[global_entity_index]] = other_processors

            # Need an extension module to populate shared_entities because in python each call to shared_entities
            # returns a temporary.
            if has_pybind11():
                cpp_code = """
                    #include <Eigen/Core>
                    #include <pybind11/pybind11.h>
                    #include <pybind11/eigen.h>
                    #include <dolfin/mesh/Mesh.h>
                    
                    using OtherProcesses = Eigen::Ref<const Eigen::Matrix<std::size_t, Eigen::Dynamic, 1>>;
                    
                    void set_shared_entities(std::shared_ptr<dolfin::Mesh> submesh, std::size_t idx, const OtherProcesses other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh->topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                    
                    PYBIND11_MODULE(SIGNATURE, m)
                    {
                        m.def("set_shared_entities", &set_shared_entities);
                    }
                """
                set_shared_entities = compile_cpp_code(cpp_code).set_shared_entities
            else:
                cpp_code = """
                    void set_shared_entities(Mesh & submesh, std::size_t idx, const Array<std::size_t>& other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh.topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                """
                set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
            for (submesh_entity_local_index, other_processors) in submesh_shared_entities.items():
                set_shared_entities(submesh, submesh_entity_local_index, other_processors, dim)
                
            log(DEBUG, "Local indices of shared entities for dimension " + str(dim) + ": " + str(list(submesh.topology().shared_entities(0).keys())))
            log(DEBUG, "Global indices of shared entities for dimension " + str(dim) + ": " + str([class_(submesh, local_index).global_index() for local_index in submesh.topology().shared_entities(dim).keys()]))
    
    # == 5. Also initialize submesh facets global indices, now that shared facets have been computed == #
    initialize_global_indices(submesh, submesh.topology().dim() - 1) # note that DOLFIN might change the numbering when compared to the one at 3bis
    
    # == 6. Restore backup_first_marker_id and return == #
    if backup_first_marker_id is not None:
        markers.array()[0] = backup_first_marker_id
    return submesh