def create_submesh(mesh, markers): mpi_comm = mesh.mpi_comm() if not has_pybind11(): mpi_comm = mpi_comm.tompi4py() assert isinstance(markers, MeshFunctionBool) assert markers.dim() == mesh.topology().dim() marker_id = True # == 1. Extract marked cells == # # Dolfin does not support a distributed mesh that is empty on some processes. # cbcpost gets around this by moving a single cell from the a non-empty processor to an empty one. # Note that, however, this cannot work if the number of marked cell is less than the number of processors. # In the interest of considering this case, we enable at least one cell (arbitrarily) on each processor. # We find this solution acceptable for our purposes, despite the increase of the reduced mesh size, # since we are never actually interested in solving a PDE on the reduced mesh, but rather only in # assemblying tensors on it and extract their values at some locations. backup_first_marker_id = None if marker_id not in markers.array(): backup_first_marker_id = markers.array()[0] markers.array()[0] = marker_id assert marker_id in markers.array() # == 2. Create submesh == # submesh = Mesh(mesh.mpi_comm()) mesh_editor = MeshEditor() mesh_editor.open(submesh, mesh.ufl_cell().cellname(), mesh.ufl_cell().topological_dimension(), mesh.ufl_cell().geometric_dimension()) # Extract cells from mesh with specified marker_id mesh_cell_indices = where(markers.array() == marker_id)[0] mesh_cells = mesh.cells()[mesh_cell_indices] mesh_global_cell_indices = sorted([mesh.topology().global_indices(mesh.topology().dim())[cell_index] for cell_index in mesh_cell_indices]) # Get vertices of extracted cells mesh_vertex_indices = unique(mesh_cells.flatten()) mesh_global_vertex_indices = sorted([mesh.topology().global_indices(0)[vertex_index] for vertex_index in mesh_vertex_indices]) # Number vertices in a way which is independent from the number of processors. To do so ... # ... first of all collect all vertices from all processors allgathered_mesh_global_vertex_indices__non_empty_processors = list() allgathered_mesh_global_vertex_indices__empty_processors = list() for r in range(mpi_comm.size): backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r) if backup_first_marker_id_r is None: allgathered_mesh_global_vertex_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r)) else: allgathered_mesh_global_vertex_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r)) allgathered_mesh_global_vertex_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__non_empty_processors)) allgathered_mesh_global_vertex_indices__empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__empty_processors)) # ... then create a dict that will contain the map from mesh global vertex index to submesh global vertex index. # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones # ... are just a side effect of the current partitioning! allgathered_mesh_to_submesh_vertex_global_indices = dict() _submesh_vertex_global_index = 0 for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__non_empty_processors: assert mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index _submesh_vertex_global_index += 1 for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__empty_processors: if mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices: allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index _submesh_vertex_global_index += 1 # Number cells in a way which is independent from the number of processors. To do so ... # ... first of all collect all cells from all processors allgathered_mesh_global_cell_indices__non_empty_processors = list() allgathered_mesh_global_cell_indices__empty_processors = list() for r in range(mpi_comm.size): backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r) if backup_first_marker_id_r is None: allgathered_mesh_global_cell_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r)) else: allgathered_mesh_global_cell_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r)) allgathered_mesh_global_cell_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__non_empty_processors)) allgathered_mesh_global_cell_indices__empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__empty_processors)) # ... then create a dict that will contain the map from mesh global cell index to submesh global cell index. # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones # ... are just a side effect of the current partitioning! allgathered_mesh_to_submesh_cell_global_indices = dict() _submesh_cell_global_index = 0 for mesh_cell_global_index in allgathered_mesh_global_cell_indices__non_empty_processors: assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index _submesh_cell_global_index += 1 for mesh_cell_global_index in allgathered_mesh_global_cell_indices__empty_processors: assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index _submesh_cell_global_index += 1 # Also create a mapping from mesh local vertex index to submesh local vertex index. mesh_to_submesh_vertex_local_indices = dict(zip(mesh_vertex_indices, list(range(len(mesh_vertex_indices))))) # Also create a mapping from mesh local cell index to submesh local cell index. mesh_to_submesh_cell_local_indices = dict(zip(mesh_cell_indices, list(range(len(mesh_cell_indices))))) # Now, define submesh cells submesh_cells = list() for i, c in enumerate(mesh_cells): submesh_cells.append([mesh_to_submesh_vertex_local_indices[j] for j in c]) # Store vertices as submesh_vertices[local_index] = (global_index, coordinates) submesh_vertices = dict() for mesh_vertex_local_index, submesh_vertex_local_index in mesh_to_submesh_vertex_local_indices.items(): submesh_vertices[submesh_vertex_local_index] = ( allgathered_mesh_to_submesh_vertex_global_indices[mesh.topology().global_indices(0)[mesh_vertex_local_index]], mesh.coordinates()[mesh_vertex_local_index] ) # Collect the global number of vertices and cells global_num_cells = mpi_comm.allreduce(len(submesh_cells), op=SUM) global_num_vertices = len(allgathered_mesh_to_submesh_vertex_global_indices) # Fill in mesh_editor mesh_editor.init_vertices_global(len(submesh_vertices), global_num_vertices) mesh_editor.init_cells_global(len(submesh_cells), global_num_cells) for local_index, cell_vertices in enumerate(submesh_cells): if has_pybind11(): mesh_editor.add_cell(local_index, cell_vertices) else: mesh_editor.add_cell(local_index, *cell_vertices) for local_index, (global_index, coordinates) in submesh_vertices.items(): mesh_editor.add_vertex_global(local_index, global_index, coordinates) mesh_editor.close() # Initialize topology submesh.topology().init(0, len(submesh_vertices), global_num_vertices) submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(submesh_cells), global_num_cells) # Correct the global index of cells for local_index in range(len(submesh_cells)): submesh.topology().set_global_index( submesh.topology().dim(), local_index, allgathered_mesh_to_submesh_cell_global_indices[mesh_global_cell_indices[local_index]] ) # == 3. Store (local) mesh to/from submesh map for cells, facets and vertices == # # Cells submesh.mesh_to_submesh_cell_local_indices = mesh_to_submesh_cell_local_indices submesh.submesh_to_mesh_cell_local_indices = mesh_cell_indices # Vertices submesh.mesh_to_submesh_vertex_local_indices = mesh_to_submesh_vertex_local_indices submesh.submesh_to_mesh_vertex_local_indices = mesh_vertex_indices # Facets mesh_vertices_to_mesh_facets = dict() mesh_facets_to_mesh_vertices = dict() for mesh_cell_index in mesh_cell_indices: mesh_cell = Cell(mesh, mesh_cell_index) for mesh_facet in facets(mesh_cell): mesh_facet_vertices = list() for mesh_facet_vertex in vertices(mesh_facet): mesh_facet_vertices.append(mesh_facet_vertex.index()) mesh_facet_vertices = tuple(sorted(mesh_facet_vertices)) if mesh_facet_vertices in mesh_vertices_to_mesh_facets: assert mesh_vertices_to_mesh_facets[mesh_facet_vertices] == mesh_facet.index() else: mesh_vertices_to_mesh_facets[mesh_facet_vertices] = mesh_facet.index() if mesh_facet.index() in mesh_facets_to_mesh_vertices: assert mesh_facets_to_mesh_vertices[mesh_facet.index()] == mesh_facet_vertices else: mesh_facets_to_mesh_vertices[mesh_facet.index()] = mesh_facet_vertices submesh_vertices_to_submesh_facets = dict() submesh_facets_to_submesh_vertices = dict() for submesh_facet in facets(submesh): submesh_facet_vertices = list() for submesh_facet_vertex in vertices(submesh_facet): submesh_facet_vertices.append(submesh_facet_vertex.index()) submesh_facet_vertices = tuple(sorted(submesh_facet_vertices)) assert submesh_facet_vertices not in submesh_vertices_to_submesh_facets submesh_vertices_to_submesh_facets[submesh_facet_vertices] = submesh_facet.index() assert submesh_facet.index() not in submesh_facets_to_submesh_vertices submesh_facets_to_submesh_vertices[submesh_facet.index()] = submesh_facet_vertices mesh_to_submesh_facets_local_indices = dict() for (mesh_facet_index, mesh_vertices) in mesh_facets_to_mesh_vertices.items(): submesh_vertices = tuple(sorted([submesh.mesh_to_submesh_vertex_local_indices[mesh_vertex] for mesh_vertex in mesh_vertices])) submesh_facet_index = submesh_vertices_to_submesh_facets[submesh_vertices] mesh_to_submesh_facets_local_indices[mesh_facet_index] = submesh_facet_index submesh_to_mesh_facets_local_indices = dict() for (submesh_facet_index, submesh_vertices) in submesh_facets_to_submesh_vertices.items(): mesh_vertices = tuple(sorted([submesh.submesh_to_mesh_vertex_local_indices[submesh_vertex] for submesh_vertex in submesh_vertices])) mesh_facet_index = mesh_vertices_to_mesh_facets[mesh_vertices] submesh_to_mesh_facets_local_indices[submesh_facet_index] = mesh_facet_index submesh.mesh_to_submesh_facet_local_indices = mesh_to_submesh_facets_local_indices submesh.submesh_to_mesh_facet_local_indices = list() assert min(submesh_to_mesh_facets_local_indices.keys()) == 0 assert max(submesh_to_mesh_facets_local_indices.keys()) == len(submesh_to_mesh_facets_local_indices.keys()) - 1 for submesh_facet_index in range(len(submesh_to_mesh_facets_local_indices)): submesh.submesh_to_mesh_facet_local_indices.append(submesh_to_mesh_facets_local_indices[submesh_facet_index]) # == 3bis. Prepare (temporary) global indices of facets == # # Wrapper to DistributedMeshTools::number_entities if has_pybind11(): cpp_code = """ #include <pybind11/pybind11.h> #include <dolfin/mesh/DistributedMeshTools.h> #include <dolfin/mesh/Mesh.h> void initialize_global_indices(std::shared_ptr<dolfin::Mesh> mesh, std::size_t dim) { dolfin::DistributedMeshTools::number_entities(*mesh, dim); } PYBIND11_MODULE(SIGNATURE, m) { m.def("initialize_global_indices", &initialize_global_indices); } """ initialize_global_indices = compile_cpp_code(cpp_code).initialize_global_indices else: cpp_code = """ void initialize_global_indices(Mesh & mesh, std::size_t dim) { DistributedMeshTools::number_entities(mesh, dim); } """ initialize_global_indices = compile_extension_module(cpp_code, additional_system_headers=["dolfin/mesh/DistributedMeshTools.h"]).initialize_global_indices initialize_global_indices(mesh, mesh.topology().dim() - 1) # Prepare global indices of facets mesh_facets_local_to_global_indices = dict() for mesh_cell_index in mesh_cell_indices: mesh_cell = Cell(mesh, mesh_cell_index) for mesh_facet in facets(mesh_cell): mesh_facets_local_to_global_indices[mesh_facet.index()] = mesh_facet.global_index() mesh_facets_global_indices_in_submesh = list() for mesh_facet_local_index in mesh_to_submesh_facets_local_indices.keys(): mesh_facets_global_indices_in_submesh.append(mesh_facets_local_to_global_indices[mesh_facet_local_index]) allgathered__mesh_facets_global_indices_in_submesh = list() for r in range(mpi_comm.size): allgathered__mesh_facets_global_indices_in_submesh.extend(mpi_comm.bcast(mesh_facets_global_indices_in_submesh, root=r)) allgathered__mesh_facets_global_indices_in_submesh = sorted(set(allgathered__mesh_facets_global_indices_in_submesh)) mesh_to_submesh_facets_global_indices = dict() for (submesh_facet_global_index, mesh_facet_global_index) in enumerate(allgathered__mesh_facets_global_indices_in_submesh): mesh_to_submesh_facets_global_indices[mesh_facet_global_index] = submesh_facet_global_index submesh_facets_local_to_global_indices = dict() for (submesh_facet_local_index, mesh_facet_local_index) in submesh_to_mesh_facets_local_indices.items(): submesh_facets_local_to_global_indices[submesh_facet_local_index] = mesh_to_submesh_facets_global_indices[mesh_facets_local_to_global_indices[mesh_facet_local_index]] # == 4. Assign shared vertices == # shared_entities_dimensions = { "vertex": 0, "facet": submesh.topology().dim() - 1, "cell": submesh.topology().dim() } shared_entities_class = { "vertex": Vertex, "facet": Facet, "cell": Cell } shared_entities_iterator = { "vertex": vertices, "facet": facets, "cell": cells } shared_entities_submesh_global_index_getter = { "vertex": lambda entity: entity.global_index(), "facet": lambda entity: submesh_facets_local_to_global_indices[entity.index()], "cell": lambda entity: entity.global_index() } for entity_type in ["vertex", "facet", "cell"]: # do not use .keys() because the order is important dim = shared_entities_dimensions[entity_type] class_ = shared_entities_class[entity_type] iterator = shared_entities_iterator[entity_type] submesh_global_index_getter = shared_entities_submesh_global_index_getter[entity_type] # Get shared entities from mesh. A subset of these will end being shared entities also the submesh # (thanks to the fact that we do not redistribute cells from one processor to another) if mpi_comm.size > 1: # some entities may not be initialized in serial, since they are not needed assert mesh.topology().have_shared_entities(dim), "Mesh shared entities have not been initialized for dimension " + str(dim) if mesh.topology().have_shared_entities(dim): # always true in parallel (when really needed) # However, it may happen that an entity which has been selected is not shared anymore because only one of # the sharing processes has it in the submesh. For instance, consider the case # of two cells across the interface (located on a facet f) between two processors. It may happen that # only one of the two cells is selected: the facet f and its vertices are not shared anymore! # For this reason, we create a new dict from global entity index to processors sharing them. Thus ... # ... first of all get global indices corresponding to local entities if entity_type in ["vertex", "cell"]: assert submesh.topology().have_global_indices(dim), "Submesh global indices have not been initialized for dimension " + str(dim) submesh_local_entities_global_index = list() submesh_local_entities_global_to_local_index = dict() for entity in iterator(submesh): local_entity_index = entity.index() global_entity_index = submesh_global_index_getter(entity) submesh_local_entities_global_index.append(global_entity_index) submesh_local_entities_global_to_local_index[global_entity_index] = local_entity_index # ... then gather all global indices from all processors gathered__submesh_local_entities_global_index = list() # over processor id for r in range(mpi_comm.size): gathered__submesh_local_entities_global_index.append(mpi_comm.bcast(submesh_local_entities_global_index, root=r)) # ... then create dict from global index to processors sharing it submesh_shared_entities__global = dict() for r in range(mpi_comm.size): for global_entity_index in gathered__submesh_local_entities_global_index[r]: if global_entity_index not in submesh_shared_entities__global: submesh_shared_entities__global[global_entity_index] = list() submesh_shared_entities__global[global_entity_index].append(r) # ... and finally popuplate shared entities dict, which is the same as the dict above except that # the current processor rank is removed and a local indexing is used submesh_shared_entities = dict() # from local index to list of integers for (global_entity_index, processors) in submesh_shared_entities__global.items(): if ( mpi_comm.rank in processors # only local entities and len(processors) > 1 # it was still shared after submesh extraction ): other_processors_list = list(processors) other_processors_list.remove(mpi_comm.rank) other_processors = array(other_processors_list, dtype=uintp) submesh_shared_entities[submesh_local_entities_global_to_local_index[global_entity_index]] = other_processors # Need an extension module to populate shared_entities because in python each call to shared_entities # returns a temporary. if has_pybind11(): cpp_code = """ #include <Eigen/Core> #include <pybind11/pybind11.h> #include <pybind11/eigen.h> #include <dolfin/mesh/Mesh.h> using OtherProcesses = Eigen::Ref<const Eigen::Matrix<std::size_t, Eigen::Dynamic, 1>>; void set_shared_entities(std::shared_ptr<dolfin::Mesh> submesh, std::size_t idx, const OtherProcesses other_processes, std::size_t dim) { std::set<unsigned int> set_other_processes; for (std::size_t i(0); i < other_processes.size(); i++) set_other_processes.insert(other_processes[i]); submesh->topology().shared_entities(dim)[idx] = set_other_processes; } PYBIND11_MODULE(SIGNATURE, m) { m.def("set_shared_entities", &set_shared_entities); } """ set_shared_entities = compile_cpp_code(cpp_code).set_shared_entities else: cpp_code = """ void set_shared_entities(Mesh & submesh, std::size_t idx, const Array<std::size_t>& other_processes, std::size_t dim) { std::set<unsigned int> set_other_processes; for (std::size_t i(0); i < other_processes.size(); i++) set_other_processes.insert(other_processes[i]); submesh.topology().shared_entities(dim)[idx] = set_other_processes; } """ set_shared_entities = compile_extension_module(cpp_code).set_shared_entities for (submesh_entity_local_index, other_processors) in submesh_shared_entities.items(): set_shared_entities(submesh, submesh_entity_local_index, other_processors, dim) log(DEBUG, "Local indices of shared entities for dimension " + str(dim) + ": " + str(list(submesh.topology().shared_entities(0).keys()))) log(DEBUG, "Global indices of shared entities for dimension " + str(dim) + ": " + str([class_(submesh, local_index).global_index() for local_index in submesh.topology().shared_entities(dim).keys()])) # == 5. Also initialize submesh facets global indices, now that shared facets have been computed == # initialize_global_indices(submesh, submesh.topology().dim() - 1) # note that DOLFIN might change the numbering when compared to the one at 3bis # == 6. Restore backup_first_marker_id and return == # if backup_first_marker_id is not None: markers.array()[0] = backup_first_marker_id return submesh
def create_submesh(mesh, markers, marker): "This function allows for a SubMesh-equivalent to be created in parallel" # Build mesh submesh = Mesh() mesh_editor = MeshEditor() mesh_editor.open(submesh, mesh.ufl_cell().cellname(), mesh.ufl_cell().topological_dimension(), mesh.ufl_cell().geometric_dimension()) # Return empty mesh if no matching markers if MPI.sum(mpi_comm_world(), int(marker in markers.array())) == 0: cbc_warning( "Unable to find matching markers in meshfunction. Submesh is empty." ) mesh_editor.close() return submesh base_cell_indices = np.where(markers.array() == marker)[0] base_cells = mesh.cells()[base_cell_indices] base_vertex_indices = np.unique(base_cells.flatten()) base_global_vertex_indices = sorted( [mesh.topology().global_indices(0)[vi] for vi in base_vertex_indices]) gi = mesh.topology().global_indices(0) shared_local_indices = set(base_vertex_indices).intersection( set(mesh.topology().shared_entities(0).keys())) shared_global_indices = [gi[vi] for vi in shared_local_indices] unshared_global_indices = list( set(base_global_vertex_indices) - set(shared_global_indices)) unshared_vertices_dist = distribution(len(unshared_global_indices)) # Number unshared vertices on separate process idx = sum(unshared_vertices_dist[:MPI.rank(mpi_comm_world())]) base_to_sub_global_indices = {} for gi in unshared_global_indices: base_to_sub_global_indices[gi] = idx idx += 1 # Gather all shared process on process 0 and assign global index all_shared_global_indices = gather(shared_global_indices, on_process=0, flatten=True) all_shared_global_indices = np.unique(all_shared_global_indices) shared_base_to_sub_global_indices = {} idx = int( MPI.max(mpi_comm_world(), float(max(base_to_sub_global_indices.values() + [-1e16]))) + 1) if MPI.rank(mpi_comm_world()) == 0: for gi in all_shared_global_indices: shared_base_to_sub_global_indices[int(gi)] = idx idx += 1 # Broadcast global numbering of all shared vertices shared_base_to_sub_global_indices = dict( zip(broadcast(shared_base_to_sub_global_indices.keys(), 0), broadcast(shared_base_to_sub_global_indices.values(), 0))) # Join shared and unshared numbering in one dict base_to_sub_global_indices = dict( base_to_sub_global_indices.items() + shared_base_to_sub_global_indices.items()) # Create mapping of local indices base_to_sub_local_indices = dict( zip(base_vertex_indices, range(len(base_vertex_indices)))) # Define sub-cells sub_cells = [None] * len(base_cells) for i, c in enumerate(base_cells): sub_cells[i] = [base_to_sub_local_indices[j] for j in c] # Store vertices as sub_vertices[local_index] = (global_index, coordinates) sub_vertices = {} for base_local, sub_local in base_to_sub_local_indices.items(): sub_vertices[sub_local] = (base_to_sub_global_indices[ mesh.topology().global_indices(0)[base_local]], mesh.coordinates()[base_local]) ## Done with base mesh # Distribute meshdata on (if any) empty processes sub_cells, sub_vertices = distribute_meshdata(sub_cells, sub_vertices) global_cell_distribution = distribution(len(sub_cells)) #global_vertex_distribution = distribution(len(sub_vertices)) global_num_cells = MPI.sum(mpi_comm_world(), len(sub_cells)) global_num_vertices = sum(unshared_vertices_dist) + MPI.sum( mpi_comm_world(), len(all_shared_global_indices)) mesh_editor.init_vertices(len(sub_vertices)) #mesh_editor.init_cells(len(sub_cells)) mesh_editor.init_cells_global(len(sub_cells), global_num_cells) global_index_start = sum( global_cell_distribution[:MPI.rank(mesh.mpi_comm())]) for index, cell in enumerate(sub_cells): if LooseVersion(dolfin_version()) >= LooseVersion("1.6.0"): mesh_editor.add_cell(index, *cell) else: mesh_editor.add_cell(int(index), global_index_start + index, np.array(cell, dtype=np.uintp)) for local_index, (global_index, coordinates) in sub_vertices.items(): #print coordinates mesh_editor.add_vertex_global(int(local_index), int(global_index), coordinates) mesh_editor.close() submesh.topology().init(0, len(sub_vertices), global_num_vertices) submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(sub_cells), global_num_cells) # FIXME: Set up shared entities # What damage does this do? submesh.topology().shared_entities(0)[0] = [] # The code below sets up shared vertices, but lacks shared facets. # It is considered incomplete, and therefore commented out ''' #submesh.topology().shared_entities(0)[0] = [] from dolfin import compile_extension_module cpp_code = """ void set_shared_entities(Mesh& mesh, std::size_t idx, const Array<std::size_t>& other_processes) { std::set<unsigned int> set_other_processes; for (std::size_t i=0; i<other_processes.size(); i++) { set_other_processes.insert(other_processes[i]); //std::cout << idx << " --> " << other_processes[i] << std::endl; } //std::cout << idx << " --> " << set_other_processes[0] << std::endl; mesh.topology().shared_entities(0)[idx] = set_other_processes; } """ set_shared_entities = compile_extension_module(cpp_code).set_shared_entities base_se = mesh.topology().shared_entities(0) se = submesh.topology().shared_entities(0) for li in shared_local_indices: arr = np.array(base_se[li], dtype=np.uintp) sub_li = base_to_sub_local_indices[li] set_shared_entities(submesh, base_to_sub_local_indices[li], arr) ''' return submesh
def create_slice(basemesh, point, normal, closest_region=False, crinkle_clip=False): """Create a slicemesh from a basemesh. :param basemesh: Mesh to slice :param point: Point in slicing plane :param normal: Normal to slicing plane :param closest_region: Set to True to extract disjoint region closest to specified point :param crinkle_clip: Set to True to return mesh of same topological dimension as basemesh .. note:: Only 3D-meshes currently supported for slicing. .. warning:: Slice-instances are intended for visualization only, and may produce erronous results if used for computations. """ assert basemesh.geometry().dim() == 3, "Can only slice 3D-meshes." P = np.array([point[0], point[1], point[2]], dtype=np.double) # Create unit normal n = np.array([normal[0],normal[1], normal[2]]) n = n/np.linalg.norm(n) #self.n = Constant((n[0], n[1], n[2])) # Calculate the distribution of vertices around the plane # (sign of np.dot(p-P, n) determines which side of the plane p is on) vsplit = np.dot(basemesh.coordinates()-P, n) # Count each cells number of vertices on the "positive" side of the plane # Only cells with vertices on both sides of the plane intersect the plane operator = np.less npos = np.sum(vsplit[basemesh.cells()] < 0, 1) intersection_cells = basemesh.cells()[(npos > 0) & (npos < 4)] if len(intersection_cells) == 0: # Try to put "zeros" on other side of plane # FIXME: handle cells with vertices exactly intersecting the plane in a more robust manner. operator = np.greater npos = np.sum(vsplit[basemesh.cells()] > 0, 1) #cell_indices = (npos > 0) & (npos < 4) intersection_cells = basemesh.cells()[(npos > 0) & (npos < 4)] if crinkle_clip: cf = CellFunction("size_t", basemesh) cf.set_all(0) cf.array()[(npos>0) & (npos<4)] = 1 mesh = create_submesh(basemesh, cf, 1) else: def add_cell(cells, cell): # Split cell into triangles for i in xrange(len(cell)-2): cells.append(cell[i:i+3]) cells = [] index = 0 indexes = {} for c in intersection_cells: a = operator(vsplit[c], 0) positives = c[np.where(a==True)[0]] negatives = c[np.where(a==False)[0]] cell = [] for pp_ind in positives: pp = basemesh.coordinates()[pp_ind] for pn_ind in negatives: pn = basemesh.coordinates()[pn_ind] if (pp_ind, pn_ind) not in indexes: # Calculate intersection point with the plane d = np.dot(P-pp, n)/np.dot(pp-pn, n) ip = pp+(pp-pn)*d indexes[(pp_ind, pn_ind)] = (index, ip) index += 1 cell.append(indexes[(pp_ind, pn_ind)][0]) add_cell(cells, cell) MPI.barrier(mpi_comm_world()) # Assign global indices # TODO: Assign global indices properly dist = distribution(index) global_idx = sum(dist[:MPI.rank(mpi_comm_world())]) vertices = {} for idx, p in indexes.values(): vertices[idx] = (global_idx, p) global_idx += 1 global_num_cells = MPI.sum(mpi_comm_world(), len(cells)) global_num_vertices = MPI.sum(mpi_comm_world(), len(vertices)) mesh = Mesh() # Return empty mesh if no intersections were found if global_num_cells == 0: mesh_editor = MeshEditor() mesh_editor.open(mesh, "triangle", 2, 3) mesh_editor.init_vertices(0) mesh_editor.init_cells(0) mesh_editor.close() else: # Distribute mesh if empty on any processors cells, vertices = distribute_meshdata(cells, vertices) # Build mesh mesh_editor = MeshEditor() mesh_editor.open(mesh, "triangle", 2, 3) mesh_editor.init_vertices(len(vertices)) mesh_editor.init_cells(len(cells)) for index, cell in enumerate(cells): mesh_editor.add_cell(index, cell[0], cell[1], cell[2]) for local_index, (global_index, coordinates) in vertices.items(): mesh_editor.add_vertex_global(int(local_index), int(global_index), coordinates) mesh_editor.close() mesh.topology().init(0, len(vertices), global_num_vertices) mesh.topology().init(2, len(cells), global_num_cells) if closest_region and mesh.size_global(0) > 0: assert MPI.size(mpi_comm_world())==1, "Extract closest region does not work in parallel" regions = compute_connectivity(mesh) i,d = mesh.bounding_box_tree().compute_closest_entity(Point(P)) if d == MPI.min(mesh.mpi_comm(), d): v = regions[int(i)] else: v = 0 v = MPI.max(mesh.mpi_comm(), v) mesh = create_submesh(mesh, regions, v) return mesh