예제 #1
0
def convert_and_create_facet_mesh_function(ifilename, ofilename):
    # First convert the gmsh mesh
    meshconvert.convert2xml(ifilename, ofilename)

    # Now load the created mesh and initialise the required connectivity information
    mesh = Mesh(ofilename)
    mesh.order()

    File(ofilename) << mesh

    D = mesh.topology().dim()
    mesh.init(D - 1, 0)

    # read the data from the gmsh file once again
    dim_count, vertices_used, tags = process_gmsh_elements(ifilename, D - 1)
    # Get the facet-node connectivity information (reshape as a row of node indices per facet)
    facets_as_nodes = mesh.topology()(D - 1, 0)().reshape(mesh.num_facets(), D)

    # Create and initialise the mesh function
    facet_mark_function = MeshFunction('uint', mesh, D - 1)
    facet_mark_function.set_all(0)

    # set the relevant values of the mesh function
    facets_to_check = range(mesh.num_facets())
    for i in range(len(tags)):
        nodes = np.sort(np.array(vertices_used[2 * i:(2 * i + D)]))
        value = tags[i][0]

        if value != 0:
            found = False
            for j in range(len(facets_to_check)):
                index = facets_to_check[j]
                if np.array_equal(facets_as_nodes[index, :], nodes):
                    found = True
                    facets_to_check.pop(j)
                    # set the value of the mesh function
                    facet_mark_function[index] = value
                    break

            if not found:
                raise Exception("The facet (%d) was not found to mark: %s" %
                                (i, nodes))

    # save the mesh function to file
    fname = os.path.splitext(ofilename)[0]
    mesh_function_file = File("%s_%s.xml" % (fname, "facet_regions"))

    mesh_function_file << facet_mark_function
예제 #2
0
def convert_and_create_facet_mesh_function ( ifilename, ofilename ):
    # First convert the gmsh mesh
    meshconvert.convert2xml ( ifilename, ofilename )
    
    # Now load the created mesh and initialise the required connectivity information
    mesh = Mesh ( ofilename )
    mesh.order()
    
    File ( ofilename ) << mesh
    
    D = mesh.topology().dim()
    mesh.init(D-1, 0)
    
    # read the data from the gmsh file once again
    dim_count, vertices_used, tags = process_gmsh_elements( ifilename, D-1 )
    # Get the facet-node connectivity information (reshape as a row of node indices per facet)
    facets_as_nodes = mesh.topology()(D-1,0)().reshape ( mesh.num_facets(), D )
    
    # Create and initialise the mesh function
    facet_mark_function = MeshFunction ( 'uint', mesh, D-1 )
    facet_mark_function.set_all( 0 )
    
    # set the relevant values of the mesh function
    facets_to_check = range( mesh.num_facets() )
    for i in range(len(tags)):
        nodes = np.sort(np.array(vertices_used[2*i:(2*i+D)]))
        value  = tags[i][0]
        
        if value != 0:
            found = False
            for j in range(len(facets_to_check)):
                index = facets_to_check[j]
                if np.array_equal(facets_as_nodes[index,:], nodes):
                    found = True;
                    facets_to_check.pop(j)
                    # set the value of the mesh function
                    facet_mark_function[index] = value
                    break;
                
            if not found:
                raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
        
    # save the mesh function to file
    fname = os.path.splitext(ofilename)[0]
    mesh_function_file = File("%s_%s.xml" % (fname, "facet_regions"))
    
    mesh_function_file << facet_mark_function
예제 #3
0
파일: utils.py 프로젝트: pezzus/fimh2021
def import_from_gmsh(fname):
    "Convert from gmsh to dolfin"

    # read with meshio
    msh = meshio.read(fname)

    # create a DOLFIN mesh (assuming 2d)
    gdim, tdim = 2, 2
    mm = Mesh()
    editor = MeshEditor()
    editor.open(mm, "triangle", gdim, tdim)

    npt = msh.points.shape[0]
    nc = msh.get_cells_type("triangle").shape[0]

    editor.init_vertices_global(npt, npt)
    editor.init_cells_global(nc, nc)

    for i, p in enumerate(msh.points):
        editor.add_vertex(i, p[:2])

    for i, c in enumerate(msh.get_cells_type("triangle")):
        editor.add_cell(i, c)

    editor.close()

    # domains
    md = mm.domains()
    md.init(tdim)
    markers = {}

    if 'gmsh:physical' not in msh.cell_data_dict:
        # no markers at all
        return mm, markers

    phy = msh.cell_data_dict['gmsh:physical']
    if 'triangle' in phy:
        for eid, val in enumerate(phy['triangle']):
            md.set_marker((eid, val), 2)

    if 'line' in phy:
        mm.init(0, 1)
        p2e = mm.topology()(0, 1)

        for l, k in zip(msh.get_cells_type("line"), phy['line']):
            e = set(p2e(l[0])).intersection(p2e(l[1])).pop()
            md.set_marker((e, k), 1)

    if 'vertex' in phy:
        for eid, val in zip(msh.get_cells_type("vertex"), phy['vertex']):
            md.set_marker((eid[0], val), 0)

    # names
    markers = tuple(
        {n: v.item()
         for n, (v, d) in msh.field_data.items() if d == dim}
        for dim in range(tdim + 1))

    return mm, markers
예제 #4
0
def load_h5_mesh(h5_file, scale_factor=None):
    '''Unpack to mesh, volumes and surfaces'''
    mesh = Mesh()
    h5 = HDF5File(mesh.mpi_comm(), h5_file, 'r')
    h5.read(mesh, 'mesh', False)
    # Convert units
    if scale_factor is not None:
        assert scale_factor > 0, "Scale factor must be a positive real number!"
        mesh.coordinates()[:] *= scale_factor

    surfaces = MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
    h5.read(surfaces, 'surfaces')

    volumes = MeshFunction('size_t', mesh, mesh.topology().dim())
    h5.read(volumes, 'volumes')

    return mesh, volumes, surfaces
예제 #5
0
def DolfinReader(directory, file):
    msh = Mesh()
    with XDMFFile("{}/{}.xdmf".format(directory, file)) as infile:
        infile.read(msh)
        dim = msh.topology().dim()
    mvc = MeshValueCollection("size_t", msh, dim=dim - 1)
    with XDMFFile("{}/{}_facets.xdmf".format(directory, file)) as infile:
        infile.read(mvc, "boundaries")
    boundaries = MeshFunctionSizet(msh, mvc)
    mvc = MeshValueCollection("size_t", msh, dim=dim)
    with XDMFFile("{}/{}_physical_region.xdmf".format(directory,
                                                      file)) as infile:
        infile.read(mvc, "subdomains")
    subdomains = MeshFunctionSizet(msh, mvc)
    return msh, boundaries, subdomains
예제 #6
0

bc = DirichletBC(V, Constant((0, 0, 0)), clamped_boundary)


# Define strain and stress
def epsilon(u):
    return 0.5 * (grad(u) + grad(u).T)


def sigma(u):
    return lambda_ * div(u) * Identity(d) + 2 * mu * epsilon(u)


# Mark facets of the mesh and Neumann boundary condition
boundaries = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
boundaries.set_all(0)


class NeumanBoundary(SubDomain):
    def inside(self, x, on_boundary):
        return on_boundary and near(x[0], 6, tol)


NeumanBoundary().mark(boundaries, 1)

# Define outer surface measure aware of Dirichlet and Neumann boundaries
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)

# Define variational problem
u = TrialFunction(V)
예제 #7
0
def gmsh2xml(ifilename, handler):
    """Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
    parser implemented as a state machine:

        0 = read 'MeshFormat'
        1 = read  mesh format data
        2 = read 'EndMeshFormat'
        3 = read 'Nodes'
        4 = read  number of vertices
        5 = read  vertices
        6 = read 'EndNodes'
        7 = read 'Elements'
        8 = read  number of cells
        9 = read  cells
        10 = done

    Afterwards, extract physical region numbers if they are defined in
    the mesh file as a mesh function.

    """

    print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")

    # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
    gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
    cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
    # the gmsh element types supported for conversion
    supported_gmsh_element_types = [1, 2, 4, 15]

    # Open files
    ifile = open(ifilename, "r")

    # Scan file for cell type
    cell_type = None
    highest_dim = 0
    line = ifile.readline()
    while line:

        # Remove newline
        line = line.rstrip("\n\r")

        # Read dimension
        if line.find("$Elements") == 0:

            line = ifile.readline()
            num_elements = int(line)
            if num_elements == 0:
                _error("No elements found in gmsh file.")
            line = ifile.readline()

            # Now iterate through elements to find largest dimension.  Gmsh
            # format might include elements of lower dimensions in the element list.
            # We also need to count number of elements of correct dimensions.
            # Also determine which vertices are not used.
            dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
            vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
            # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
            tags_for_dim = {0: [], 1: [], 2: [], 3: []}

            while line.find("$EndElements") == -1:
                element = line.split()
                elem_type = int(element[1])
                num_tags = int(element[2])
                if elem_type in supported_gmsh_element_types:
                    dim = gmsh_dim[elem_type]
                    if highest_dim < dim:
                        highest_dim = dim
                    node_num_list = [int(node) for node in element[3 + num_tags:]]
                    vertices_used_for_dim[dim].extend(node_num_list)
                    if num_tags > 0:
                        tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
                    dim_count[dim] += 1
                else:
                    #TODO: output a warning here. "gmsh element type %d not supported" % elem_type
                    pass
                line = ifile.readline()
        else:
            # Read next line
            line = ifile.readline()

    # Check that we got the cell type and set num_cells_counted
    if highest_dim == 0:
        _error("Unable to find cells of supported type.")

    num_cells_counted = dim_count[highest_dim]
    vertex_set = set(vertices_used_for_dim[highest_dim])
    vertices_used_for_dim[highest_dim] = None

    vertex_dict = {}
    for n,v in enumerate(vertex_set):
        vertex_dict[v] = n

    # Step to beginning of file
    ifile.seek(0)

    # Set mesh type
    handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)

    # Initialise node list (gmsh does not export all vertexes in order)
    nodelist = {}

    # Current state
    state = 0

    # Write data
    num_vertices_read = 0
    num_cells_read = 0

    # Only import the dolfin objects if facet markings exist
    process_facets = False
    if len(tags_for_dim[highest_dim-1]) > 0:
        # first construct the mesh
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error("DOLFIN must be installed to handle Gmsh boundary regions")
        mesh = Mesh()
        mesh_editor = MeshEditor ()
        mesh_editor.open( mesh, highest_dim, highest_dim )
        process_facets = True
    else:
        # TODO: Output a warning or an error here
        me = None

    while state != 10:

        # Read next line
        line = ifile.readline()
        if not line: break

        # Skip comments
        if line[0] == '#':
            continue

        # Remove newline
        line = line.rstrip("\n\r")

        if state == 0:
            if line == "$MeshFormat":
                state = 1
        elif state == 1:
            (version, file_type, data_size) = line.split()
            state = 2
        elif state == 2:
            if line == "$EndMeshFormat":
                state = 3
        elif state == 3:
            if line == "$Nodes":
                state = 4
        elif state == 4:
            num_vertices = len(vertex_dict)
            handler.start_vertices(num_vertices)
            if process_facets:
                mesh_editor.init_vertices_global(num_vertices, num_vertices)
            state = 5
        elif state == 5:
            (node_no, x, y, z) = line.split()
            node_no = int(node_no)
            x,y,z = [float(xx) for xx in (x,y,z)]
            if node_no in vertex_dict:
                node_no = vertex_dict[node_no]
            else:
                continue
            nodelist[int(node_no)] = num_vertices_read
            handler.add_vertex(num_vertices_read, [x, y, z])
            if process_facets:
                if highest_dim == 1:
                    coords = numpy.array([x])
                elif highest_dim == 2:
                    coords = numpy.array([x, y])
                elif highest_dim == 3:
                    coords = numpy.array([x, y, z])
                mesh_editor.add_vertex(num_vertices_read, coords)

            num_vertices_read +=1

            if num_vertices == num_vertices_read:
                handler.end_vertices()
                state = 6
        elif state == 6:
            if line == "$EndNodes":
                state = 7
        elif state == 7:
            if line == "$Elements":
                state = 8
        elif state == 8:
            handler.start_cells(num_cells_counted)
            if process_facets:
                mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)

            state = 9
        elif state == 9:
            element = line.split()
            elem_type = int(element[1])
            num_tags  = int(element[2])
            if elem_type in supported_gmsh_element_types:
                dim = gmsh_dim[elem_type]
            else:
                dim = 0
            if dim == highest_dim:
                node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
                for node in node_num_list:
                    if not node in nodelist:
                        _error("Vertex %d of %s %d not previously defined." %
                              (node, cell_type_for_dim[dim], num_cells_read))
                cell_nodes = [nodelist[n] for n in node_num_list]
                handler.add_cell(num_cells_read, cell_nodes)

                if process_facets:
                    cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
                    mesh_editor.add_cell(num_cells_read, cell_nodes)

                num_cells_read +=1

            if num_cells_counted == num_cells_read:
                handler.end_cells()
                if process_facets:
                    mesh_editor.close()
                state = 10
        elif state == 10:
            break

    # Write mesh function based on the Physical Regions defined by
    # gmsh, but only if they are not all zero. All zero physical
    # regions indicate that no physical regions were defined.
    if highest_dim not in [1,2,3]:
        _error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)

    tags = tags_for_dim[highest_dim]
    physical_regions = tuple(tag[0] for tag in tags)
    if not all(tag == 0 for tag in physical_regions):
        handler.start_meshfunction("physical_region", dim, num_cells_counted)
        for i, physical_region in enumerate(physical_regions):
            handler.add_entity_meshfunction(i, physical_region)
        handler.end_meshfunction()

    # Now process the facet markers
    tags = tags_for_dim[highest_dim-1]
    if (len(tags) > 0) and (mesh is not None):
        physical_regions = tuple(tag[0] for tag in tags)
        if not all(tag == 0 for tag in physical_regions):
            mesh.init(highest_dim-1,0)

            # Get the facet-node connectivity information (reshape as a row of node indices per facet)
            if highest_dim==1:
              # for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
              # as facets are vertices
              facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
            else:
              facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )

            # Build the reverse map
            nodes_as_facets = {}
            for facet in range(mesh.num_facets()):
              nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet

            data = [int(0*k) for k in range(mesh.num_facets()) ]
            for i, physical_region in enumerate(physical_regions):
                nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
                nodes.sort()

                if physical_region != 0:
                    try:
                        index = nodes_as_facets[tuple(nodes)]
                        data[index] = physical_region
                    except IndexError:
                        raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )

            # Create and initialise the mesh function
            handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
            for index, physical_region in enumerate ( data ):
                handler.add_entity_meshfunction(index, physical_region)
            handler.end_meshfunction()

    # Check that we got all data
    if state == 10:
        print("Conversion done")
    else:
       _error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")

    # Close files
    ifile.close()
예제 #8
0
def create_submesh(mesh, markers, marker):
    "This function allows for a SubMesh-equivalent to be created in parallel"
    # Build mesh
    submesh = Mesh()
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())

    # Return empty mesh if no matching markers
    if MPI.sum(mpi_comm_world(), int(marker in markers.array())) == 0:
        cbc_warning(
            "Unable to find matching markers in meshfunction. Submesh is empty."
        )
        mesh_editor.close()
        return submesh

    base_cell_indices = np.where(markers.array() == marker)[0]
    base_cells = mesh.cells()[base_cell_indices]
    base_vertex_indices = np.unique(base_cells.flatten())

    base_global_vertex_indices = sorted(
        [mesh.topology().global_indices(0)[vi] for vi in base_vertex_indices])

    gi = mesh.topology().global_indices(0)
    shared_local_indices = set(base_vertex_indices).intersection(
        set(mesh.topology().shared_entities(0).keys()))
    shared_global_indices = [gi[vi] for vi in shared_local_indices]

    unshared_global_indices = list(
        set(base_global_vertex_indices) - set(shared_global_indices))
    unshared_vertices_dist = distribution(len(unshared_global_indices))

    # Number unshared vertices on separate process
    idx = sum(unshared_vertices_dist[:MPI.rank(mpi_comm_world())])
    base_to_sub_global_indices = {}
    for gi in unshared_global_indices:
        base_to_sub_global_indices[gi] = idx
        idx += 1

    # Gather all shared process on process 0 and assign global index
    all_shared_global_indices = gather(shared_global_indices,
                                       on_process=0,
                                       flatten=True)
    all_shared_global_indices = np.unique(all_shared_global_indices)

    shared_base_to_sub_global_indices = {}
    idx = int(
        MPI.max(mpi_comm_world(),
                float(max(base_to_sub_global_indices.values() + [-1e16]))) + 1)
    if MPI.rank(mpi_comm_world()) == 0:
        for gi in all_shared_global_indices:
            shared_base_to_sub_global_indices[int(gi)] = idx
            idx += 1

    # Broadcast global numbering of all shared vertices
    shared_base_to_sub_global_indices = dict(
        zip(broadcast(shared_base_to_sub_global_indices.keys(), 0),
            broadcast(shared_base_to_sub_global_indices.values(), 0)))

    # Join shared and unshared numbering in one dict
    base_to_sub_global_indices = dict(
        base_to_sub_global_indices.items() +
        shared_base_to_sub_global_indices.items())

    # Create mapping of local indices
    base_to_sub_local_indices = dict(
        zip(base_vertex_indices, range(len(base_vertex_indices))))

    # Define sub-cells
    sub_cells = [None] * len(base_cells)
    for i, c in enumerate(base_cells):
        sub_cells[i] = [base_to_sub_local_indices[j] for j in c]

    # Store vertices as sub_vertices[local_index] = (global_index, coordinates)
    sub_vertices = {}
    for base_local, sub_local in base_to_sub_local_indices.items():
        sub_vertices[sub_local] = (base_to_sub_global_indices[
            mesh.topology().global_indices(0)[base_local]],
                                   mesh.coordinates()[base_local])

    ## Done with base mesh

    # Distribute meshdata on (if any) empty processes
    sub_cells, sub_vertices = distribute_meshdata(sub_cells, sub_vertices)
    global_cell_distribution = distribution(len(sub_cells))
    #global_vertex_distribution = distribution(len(sub_vertices))

    global_num_cells = MPI.sum(mpi_comm_world(), len(sub_cells))
    global_num_vertices = sum(unshared_vertices_dist) + MPI.sum(
        mpi_comm_world(), len(all_shared_global_indices))

    mesh_editor.init_vertices(len(sub_vertices))
    #mesh_editor.init_cells(len(sub_cells))
    mesh_editor.init_cells_global(len(sub_cells), global_num_cells)
    global_index_start = sum(
        global_cell_distribution[:MPI.rank(mesh.mpi_comm())])

    for index, cell in enumerate(sub_cells):
        if LooseVersion(dolfin_version()) >= LooseVersion("1.6.0"):
            mesh_editor.add_cell(index, *cell)
        else:
            mesh_editor.add_cell(int(index), global_index_start + index,
                                 np.array(cell, dtype=np.uintp))

    for local_index, (global_index, coordinates) in sub_vertices.items():
        #print coordinates
        mesh_editor.add_vertex_global(int(local_index), int(global_index),
                                      coordinates)

    mesh_editor.close()

    submesh.topology().init(0, len(sub_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(),
                            len(sub_cells), global_num_cells)

    # FIXME: Set up shared entities
    # What damage does this do?
    submesh.topology().shared_entities(0)[0] = []
    # The code below sets up shared vertices, but lacks shared facets.
    # It is considered incomplete, and therefore commented out
    '''
    #submesh.topology().shared_entities(0)[0] = []
    from dolfin import compile_extension_module
    cpp_code = """
    void set_shared_entities(Mesh& mesh, std::size_t idx, const Array<std::size_t>& other_processes)
    {
        std::set<unsigned int> set_other_processes;
        for (std::size_t i=0; i<other_processes.size(); i++)
        {
            set_other_processes.insert(other_processes[i]);
            //std::cout << idx << " --> " << other_processes[i] << std::endl;
        }
        //std::cout << idx << " --> " << set_other_processes[0] << std::endl;
        mesh.topology().shared_entities(0)[idx] = set_other_processes;
    }
    """

    set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
    base_se = mesh.topology().shared_entities(0)
    se = submesh.topology().shared_entities(0)

    for li in shared_local_indices:
        arr = np.array(base_se[li], dtype=np.uintp)
        sub_li = base_to_sub_local_indices[li]
        set_shared_entities(submesh, base_to_sub_local_indices[li], arr)
    '''
    return submesh
예제 #9
0
    if discretization == "DG":
        Q_element = FiniteElement("DG", mesh.ufl_cell(), 1)
        W_element = BlockElement(V_element, Q_element)
    elif discretization == "EG":
        Q_element = FiniteElement("CG", mesh.ufl_cell(), 1)
        D_element = FiniteElement("DG", mesh.ufl_cell(), 0)
        EG_element = Q_element + D_element
        W_element = BlockElement(V_element, EG_element)
    else:
        raise RuntimeError("Invalid discretization")
    W = BlockFunctionSpace(mesh, W_element)

    PM = FunctionSpace(mesh, "DG", 0)
    TM = TensorFunctionSpace(mesh, "DG", 0)

    I = Identity(mesh.topology().dim())

    dx = Measure("dx", domain=mesh, subdomain_data=subdomains)
    ds = Measure("ds", domain=mesh, subdomain_data=boundaries)
    dS = Measure("dS", domain=mesh, subdomain_data=boundaries)

    # Test and trial functions
    vq = BlockTestFunction(W)
    (v, q) = block_split(vq)
    up = BlockTrialFunction(W)
    (u, p) = block_split(up)

    w = BlockFunction(W)
    w0 = BlockFunction(W)
    (u0, p0) = block_split(w0)
예제 #10
0
    def test_convert_triangle(
            self):  # Disabled because it fails, see FIXME below

        # test no. 1
        from dolfin import Mesh, MPI

        fname = os.path.join(os.path.dirname(__file__), "data", "triangle")
        dfname = fname + ".xml"

        # Read triangle file and convert to a dolfin xml mesh file
        meshconvert.triangle2xml(fname, dfname)

        # Read in dolfin mesh and check number of cells and vertices
        mesh = Mesh(dfname)
        self.assertEqual(mesh.num_vertices(), 96)
        self.assertEqual(mesh.num_cells(), 159)

        # Clean up
        os.unlink(dfname)

        # test no. 2
        from dolfin import MPI, Mesh, MeshFunction, \
                           edges, Edge, faces, Face, \
                           SubsetIterator, facets

        fname = os.path.join(os.path.dirname(__file__), "data",
                             "test_Triangle_3")
        dfname = fname + ".xml"
        dfname0 = fname + ".attr0.xml"

        # Read triangle file and convert to a dolfin xml mesh file
        meshconvert.triangle2xml(fname, dfname)

        # Read in dolfin mesh and check number of cells and vertices
        mesh = Mesh(dfname)
        mesh.init()
        mfun = MeshFunction('double', mesh, dfname0)
        self.assertEqual(mesh.num_vertices(), 58)
        self.assertEqual(mesh.num_cells(), 58)

        # Create a size_t MeshFunction and assign the values based on the
        # converted Meshfunction
        cf = MeshFunction("size_t", mesh, mesh.topology().dim())
        cf.array()[mfun.array() == 10.0] = 0
        cf.array()[mfun.array() == -10.0] = 1

        # Meassure total area of cells with 1 and 2 marker
        add = lambda x, y: x + y
        area0 = reduce(add, (Face(mesh, cell.index()).area() \
                             for cell in SubsetIterator(cf, 0)), 0.0)
        area1 = reduce(add, (Face(mesh, cell.index()).area() \
                             for cell in SubsetIterator(cf, 1)), 0.0)
        total_area = reduce(add, (face.area() for face in faces(mesh)), 0.0)

        # Check that all cells in the two domains are either above or below y=0
        self.assertTrue(
            all(cell.midpoint().y() < 0 for cell in SubsetIterator(cf, 0)))
        self.assertTrue(
            all(cell.midpoint().y() > 0 for cell in SubsetIterator(cf, 1)))

        # Check that the areas add up
        self.assertAlmostEqual(area0 + area1, total_area)

        # Measure the edge length of the two edge domains
        #edge_markers = mesh.domains().facet_domains()
        edge_markers = mesh.domains().markers(mesh.topology().dim() - 1)
        self.assertTrue(edge_markers is not None)
        #length0 = reduce(add, (Edge(mesh, e.index()).length() \
        #                    for e in SubsetIterator(edge_markers, 0)), 0.0)
        length0, length1 = 0.0, 0.0
        for item in list(edge_markers.items()):
            if item[1] == 0:
                e = Edge(mesh, int(item[0]))
                length0 += Edge(mesh, int(item[0])).length()
            elif item[1] == 1:
                length1 += Edge(mesh, int(item[0])).length()

        # Total length of all edges and total length of boundary edges
        total_length = reduce(add, (e.length() for e in edges(mesh)), 0.0)
        boundary_length = reduce(add, (Edge(mesh, f.index()).length() \
                          for f in facets(mesh) if f.exterior()), 0.0)

        # Check that the edges add up
        self.assertAlmostEqual(length0 + length1, total_length)
        self.assertAlmostEqual(length1, boundary_length)

        # Clean up
        os.unlink(dfname)
        os.unlink(dfname0)
예제 #11
0
    h5.read(tile, 'mesh', False)

    # Shift and so
    x = tile.coordinates()
    xmin = x.min(axis=0)
    dx = x.max(axis=0) - xmin

    if args.shift_origin: x[:] -= xmin

    x[:] *= args.scale_x

    # Did it work?
    print(dx, 'vs', x.max(axis=0) - x.min(axis=0), xmin, x.min(axis=0))

    data = {}
    cell_dim = tile.topology().dim()
    facet_dim = cell_dim - 1

    if args.facet_tags:
        data = load_data(tile, (h5, args.facet_tags), facet_dim, data)

    if args.cell_tags:
        data = load_data(tile, (h5, args.cell_tags), cell_dim, data)

    t = Timer('tile')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)
    info('\nTiling took %g s; nvertices %d, nfacets %d, ncells %d' %
         (t.stop(), mesh.num_vertices(), mesh.init(2), mesh.num_cells()))

    x_ = mesh.coordinates()
    print('Final mesh size', x_.min(axis=0), x_.max(axis=0) - x_.min(axis=0))
예제 #12
0
파일: assembly.py 프로젝트: pdelab/pyeafe
def eafe_assemble(
    mesh: Mesh,
    diffusion: Coefficient,
    convection: Optional[Coefficient] = None,
    reaction: Optional[Coefficient] = None,
) -> dolfin.Matrix:
    """
    Assembly of stiffness matrix for a generic linear elliptic equation:
    for `u` a continuous piecewise-linear finite element function,
      -div(diffusion * grad(u) + convection * u) + reaction * u = source

    Edge-integral quantities are approximated by a midpoint quadrature rule.

    :param mesh: Mesh defining finite element space
    :param diff: Coefficient for diffusion coefficient
    :param conv: [Optional] Coefficient for convection coefficient
    :param reac: [Optional] Coefficient for reaction coefficient

    :return: dolfin.Matrix pertaining to EAFE stiffness matrix
    """

    if not issubclass(mesh.__class__, Mesh):
        raise TypeError(
            "Invalid mesh parameter: must inherit from dolfin.Mesh")

    logging.getLogger("FFC").setLevel(logging.WARNING)
    quadrature_degree = parameters["form_compiler"]["quadrature_degree"]
    parameters["form_compiler"]["quadrature_degree"] = 2

    spatial_dim: int = mesh.topology().dim()
    cell_vertex_count: int = spatial_dim + 1

    try:
        edge_advection = define_edge_advection(spatial_dim, diffusion,
                                               convection)
        lumped_reaction = define_mass_lumping(cell_vertex_count, reaction)
    except Exception as e:
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
        raise e

    V = FunctionSpace(mesh, "Lagrange", 1)
    u = TrialFunction(V)
    v = TestFunction(V)
    a = inner(grad(u), grad(v)) * dx
    A: dolfin.Matrix = assemble(a)
    mat: dolfin.PETScMatrix = as_backend_type(A).mat()

    A.zero()
    dof_map = V.dofmap()
    dof_coord = V.tabulate_dof_coordinates()

    for cell in cells(mesh):
        local_tensor = assemble_local(a, cell)
        local_to_global_map = dof_map.cell_dofs(cell.index())
        cell_vertex = np.empty([cell_vertex_count, spatial_dim])

        for local_dof in range(0, cell_vertex_count):
            dof_id = local_to_global_map[local_dof]
            for coord in range(0, spatial_dim):
                cell_vertex[local_dof, coord] = dof_coord[dof_id, coord]

        for vertex_id in range(0, cell_vertex_count):
            vertex = cell_vertex[vertex_id]
            local_tensor[vertex_id, vertex_id] = lumped_reaction(vertex, cell)

            for edge_id in range(0, cell_vertex_count):
                if edge_id == vertex_id:
                    continue

                edge = cell_vertex[edge_id] - vertex
                local_tensor[vertex_id,
                             edge_id] *= edge_advection(vertex, edge, cell)
                local_tensor[vertex_id, vertex_id] -= local_tensor[vertex_id,
                                                                   edge_id]

        local_to_global_list = local_to_global_map.tolist()
        mat.setValuesLocal(
            local_to_global_list,
            local_to_global_list,
            local_tensor,
            PETSc.InsertMode.ADD,
        )

    A.apply("insert")
    mat.assemble()

    parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
    return A
예제 #13
0
def mesh_around_1d(mesh, size=1, scale=10, padding=0.05):
    '''
    From a 1d in xd (X > 1) mesh (in XML format) produce a Xd mesh where
    the 1d structure is embedded. Mesh size close to strucure should 
    be size(given as multiple of hmin(), elsewhere scale * size. Padding 
    controls size of the bounding box.
    '''
    dot = mesh.find('.')
    root, ext = mesh[:dot], mesh[dot:]
    assert ext == '.xml' or ext == '.xml.gz', ext

    mesh = Mesh(mesh)
    gdim = mesh.geometry().dim()
    assert gdim > 1 and mesh.topology().dim() == 1

    x = mesh.coordinates()
    mesh.init(1, 0)

    # Compute fall back mesh size:
    assert size > 0
    size = mesh.hmin() * size

    # Don't allow zero padding - collision of lines with bdry segfaults
    # too ofter so we prevent it
    assert padding > 0
    # Finally scale better be positive
    assert scale > 0

    point = (lambda xi: tuple(xi) + (0, ))\
            if gdim == 2 else (lambda xi: tuple(xi))

    geo = '.'.join([root, 'geo'])
    with open(geo, 'w') as outfile:
        # Setup
        outfile.write('SetFactory("OpenCASCADE");\n')
        outfile.write('size = %g;\n' % size)
        outfile.write('SIZE = %g;\n' % (size * scale))

        # Points
        fmt = 'Point(%d) = {%.16f, %.16f, %.16f, size};\n'
        for i, xi in enumerate(x, 1):
            outfile.write(fmt % ((i, ) + point(xi)))
        # Lines
        fmt = 'Line(%d) = {%d, %d};\n'
        for i, cell in enumerate(cells(mesh), 1):
            outfile.write(fmt % ((i, ) + tuple(cell.entities(0) + 1)))

        # BBox
        xmin, xmax = x.min(0), x.max(0)
        padding = (xmax - xmin) * padding / 2.
        xmin -= padding
        xmax += padding
        dx = xmax - xmin

        if gdim == 2 or dx[-1] < 1E-14:  # All points are on a plane
            rect = 'Rectangle(1) = {%g, %g, %g, %g, %g};\n' % (
                xmin[0], xmin[1], 0 if gdim == 2 else xmin[2], dx[0], dx[1])
            outfile.write(rect)
            bbox = 'Surface'
        else:
            box = 'Box(1) = {%g, %g, %g, %g, %g, %g};\n' % (
                xmin[0], xmin[1], xmin[2], dx[0], dx[1], dx[2])
            outfile.write(box)
            bbox = 'Volume'

        # Crack
        for line in xrange(1, mesh.num_cells() + 1):
            outfile.write('Line{%d} In %s{1};\n' % (line, bbox))

        # Add Physical volume/surface
        outfile.write('Physical %s(1) = {1};\n' % bbox)

        # Add Physical surface/line
        lines = ', '.join(
            map(lambda v: '%d' % v, xrange(1,
                                           mesh.num_cells() + 1)))
        outfile.write('Physical Line(1) = {%s};\n' % lines)
    return geo, gdim
예제 #14
0
def test(path, type='mf'):
    '''Evolve the tile in (n, n) pattern checking volume/surface properties'''

    comm = mpi_comm_world()
    h5 = HDF5File(comm, path, 'r')
    tile = Mesh()
    h5.read(tile, 'mesh', False)

    init_container = lambda type, dim: (
        MeshFunction('size_t', tile, dim, 0)
        if type == 'mf' else MeshValueCollection('size_t', tile, dim))

    for n in (2, 4):
        data = {}
        checks = {}
        for dim, name in zip((2, 3), ('surfaces', 'volumes')):
            # Get the collection
            collection = init_container(type, dim)
            h5.read(collection, name)

            if type == 'mvc': collection = as_meshf(collection)

            # Data to evolve
            tile.init(dim, 0)
            e2v = tile.topology()(dim, 0)
            # Only want to evolve tag 1 (interfaces) for the facets.
            data[(dim, 1)] = np.array(
                [e2v(e.index()) for e in SubsetIterator(collection, 1)],
                dtype='uintp')

            if dim == 2:
                check = lambda m, f: assemble(
                    FacetArea(m) * ds(domain=m,
                                      subdomain_data=f,
                                      subdomain_id=1) + avg(FacetArea(m)) *
                    dS(domain=m, subdomain_data=f, subdomain_id=1))
            else:
                check = lambda m, f: assemble(
                    CellVolume(m) * dx(
                        domain=m, subdomain_data=f, subdomain_id=1))

            checks[
                dim] = lambda m, f, t=tile, c=collection, n=n, check=check: abs(
                    check(m, f) - n**2 * check(t, c)) / (n**2 * check(t, c))

        t = Timer('x')
        mesh, mesh_data = TileMesh(tile, (n, n), mesh_data=data)
        info('\tTiling took %g s. Ncells %d, nvertices %d, \n' %
             (t.stop(), mesh.num_vertices(), mesh.num_cells()))

        foos = mf_from_data(mesh, mesh_data)
        # Mesh Functions
        from_mf = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        mvcs = mvc_from_data(mesh, mesh_data)
        foos = as_meshf(mvcs)
        # Mesh ValueCollections
        from_mvc = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        assert np.linalg.norm(from_mf - from_mvc) < 1E-13
        # I ignore shared facets so there is bound to be some error in facets
        # Volume should match well
        print from_mf
예제 #15
0
class CavityProblemSetup():
    def __init__(self, parameters, mesh_name, facet_name):
        """
        Create the required function spaces, functions and boundary conditions
        for a channel flow problem
        """
        self.mesh = Mesh()
        with XDMFFile(mesh_name) as infile:
            infile.read(self.mesh)

        mvc = MeshValueCollection("size_t", self.mesh,
                                  self.mesh.topology().dim() - 1)
        with XDMFFile(facet_name) as infile:
            infile.read(mvc, "name_to_read")
        mf = self.mf = cpp.mesh.MeshFunctionSizet(self.mesh, mvc)
        self.bc_dict = {"bottom": 1, "right": 2, "top": 3, "left": 4}

        T_init = Constant(parameters["initial temperature [°C]"])
        self.t_amb = Constant(parameters["ambient temperature [°C]"])
        self.t_feeder = Constant(parameters["temperature feeder [°C]"])
        self.k_top = Constant(parameters["thermal conductivity top [W/(m K)]"])
        self.k_lft = Constant(
            parameters["thermal conductivity left [W/(m K)]"])
        self.k_btm = Constant(
            parameters["thermal conductivity bottom [W/(m K)]"])
        self.k_rgt = Constant(
            parameters["thermal conductivity right [W/(m K)]"])
        self.U_m = Constant(parameters["mean velocity lid [m/s]"])
        g = parameters["gravity [m/s²]"]
        self.g = Constant((0.0, -g))
        self.dt = Constant(parameters["dt [s]"])
        self.D = Constant(parameters["Diffusivity [-]"])

        self.V = V = VectorFunctionSpace(self.mesh, 'P', 2)
        self.Q = Q = FunctionSpace(self.mesh, 'P', 1)
        self.T = T = FunctionSpace(self.mesh, 'P', 1)

        self.ds_ = Measure("ds", domain=self.mesh, subdomain_data=mf)
        self.vu, self.vp, self.vt = (TestFunction(V), TestFunction(Q),
                                     TestFunction(T))
        self.u_, self.p_, self.t_ = Function(V), Function(Q), Function(T)
        self.mu, self.rho = Function(T), Function(T)
        self.u_1, self.p_1, self.t_1, self.rho_1 = (Function(V), Function(Q),
                                                    Function(T), Function(T))
        self.u, self.p, self.t = (TrialFunction(V), TrialFunction(Q),
                                  TrialFunction(T))

        # boundary conditions
        self.no_slip = Constant((0., 0))
        self.topflow = Expression(("-x[0] * (x[0] - 1.0) * 6.0 * m", "0.0"),
                                  m=self.U_m,
                                  degree=2)
        bc0 = DirichletBC(V, self.topflow, mf, self.bc_dict["top"])
        bc1 = DirichletBC(V, self.no_slip, mf, self.bc_dict["left"])
        bc2 = DirichletBC(V, self.no_slip, mf, self.bc_dict["bottom"])
        bc3 = DirichletBC(V, self.no_slip, mf, self.bc_dict["right"])
        # bc4 = df.DirichletBC(Q, df.Constant(0), top)
        # bc3 = df.DirichletBC(T, df.Constant(800), top)
        self.bcu = [bc0, bc1, bc2, bc3]
        self.bcp = [DirichletBC(Q, Constant(0), mf, self.bc_dict["top"])]
        self.bcp = []
        self.bct = []
        # self.bct = [DirichletBC(T, Constant(self.t_feeder), mf,
        #                         self.bc_dict["top"])]

        self.robin_boundary_terms = (
            self.k_btm * (self.t - self.t_amb) * self.vt * self.ds_(1) +
            self.k_rgt * (self.t - self.t_amb) * self.vt * self.ds_(2)
            # + self.k_top*(self.t - self.t_feeder)*self.vt*self.ds_(3)
            + self.k_lft * (self.t - self.t_amb) * self.vt * self.ds_(4))
        print("k, T", self.k_btm.values(), self.t_feeder.values())
        print("k, T", self.k_rgt.values(), self.t_amb.values())
        # print("k, T", self.k_top.values(), self.t_amb.values())
        print("k, T", self.k_lft.values(), self.t_amb.values())

        # set initial values
        # TODO: find a better solution
        x, y = T.tabulate_dof_coordinates().T
        self.u_1.vector().vec().array[:] = 1e-6
        # self.u_k.vector().vec().array[:] = 1e-6
        self.p_.vector().vec(
        ).array[:] = -self.rho.vector().vec().array * g * y
        self.p_1.vector().vec(
        ).array[:] = -self.rho.vector().vec().array * g * y
        self.t_1.vector().vec().array = T_init
        self.t_.assign(self.t_1)
        return

    def stokes(self):
        P2 = VectorElement("CG", self.mesh.ufl_cell(), 2)
        P1 = FiniteElement("CG", self.mesh.ufl_cell(), 1)
        TH = P2 * P1
        VQ = FunctionSpace(self.mesh, TH)
        mf = self.mf
        self.no_slip = Constant((0., 0))
        self.topflow = Expression(("-x[0] * (x[0] - 1.0) * 6.0 * m", "0.0"),
                                  m=self.U_m,
                                  degree=2)
        bc0 = DirichletBC(VQ.sub(0), self.topflow, mf, self.bc_dict["top"])
        bc1 = DirichletBC(VQ.sub(0), self.no_slip, mf, self.bc_dict["left"])
        bc2 = DirichletBC(VQ.sub(0), self.no_slip, mf, self.bc_dict["bottom"])
        bc3 = DirichletBC(VQ.sub(0), self.no_slip, mf, self.bc_dict["right"])
        # bc4 = DirichletBC(VQ.sub(1), Constant(0), mf, self.bc_dict["top"])
        bcs = [bc0, bc1, bc2, bc3]

        vup = TestFunction(VQ)
        up = TrialFunction(VQ)
        # the solution will be in here:
        up_ = Function(VQ)

        u, p = split(up)  # Trial
        vu, vp = split(vup)  # Test
        u_, p_ = split(up_)  # Function holding the solution
        F = self.mu*inner(grad(vu), grad(u))*dx - inner(div(vu), p)*dx \
            - inner(vp, div(u))*dx + dot(self.g*self.rho, vu)*dx
        solve(lhs(F) == rhs(F), up_, bcs=bcs)
        self.u_.assign(project(u_, self.V))
        self.p_.assign(project(p_, self.Q))
        return

    def initial_condition_from_file(self, path_u, path_p):
        f_in = XDMFFile(path_u)
        f_in.read_checkpoint(self.u_, "f", 0)
        f_in = XDMFFile(path_p)
        f_in.read_checkpoint(self.p_, "f", 0)
        return

    def get_rho(self):
        return self.rho.vector().vec().array

    def set_rho(self, rho):
        self.rho.vector().vec().array[:] = rho

    def get_mu(self):
        return self.mu.vector().vec().array

    def set_mu(self, mu):
        self.mu.vector().vec().array[:] = mu

    def get_t(self):
        return self.t_.vector().vec().array

    def set_t(self, t):
        self.t_.vector().vec().array[:] = t

    def get_dt(self):
        return self.dt.values()

    def set_dt(self, dt):
        self.dt.assign(dt)

    def get_D(self):
        return self.D.values()

    def set_D(self, D):
        self.D.assign(D)

    def get_t_amb(self):
        return self.t_amb.values()

    def set_t_amb(self, t_amb):
        self.t_amb.assign(t_amb)

    def plot(self):
        cmap = mpl.cm.inferno
        cmap_r = mpl.cm.inferno_r
        u, p, t, m, r = self.u_, self.p_, self.t_, self.mu, self.rho
        mesh = self.mesh
        w0 = u.compute_vertex_values(mesh)
        w0.shape = (2, -1)
        magnitude = np.linalg.norm(w0, axis=0)
        x, y = np.split(mesh.coordinates(), 2, 1)
        u, v = np.split(w0, 2, 0)
        x, y, u, v = x.ravel(), y.ravel(), u.ravel(), v.ravel()
        tri = mesh.cells()
        pressure = p.compute_vertex_values(mesh)
        temperature = t.compute_vertex_values(mesh)
        viscosity = m.compute_vertex_values(mesh)
        density = r.compute_vertex_values(mesh)

        fig = plt.figure(figsize=(12, 8))
        ax1 = plt.subplot(121)
        ax2 = plt.subplot(243, sharex=ax1, sharey=ax1)
        ax3 = plt.subplot(244, sharex=ax1, sharey=ax1)
        ax4 = plt.subplot(247, sharex=ax1, sharey=ax1)
        ax5 = plt.subplot(248, sharex=ax1, sharey=ax1)
        ax1.plot(x, y, "k.", ms=.5)
        not0 = magnitude > 1e-6
        if np.sum(not0) > 0:
            ax1.quiver(x[not0], y[not0], u[not0], v[not0], magnitude[not0])
        c2 = ax2.tricontourf(x, y, tri, pressure, levels=40, cmap=cmap)
        c3 = ax3.tricontourf(x, y, tri, temperature, levels=40, cmap=cmap)
        c4 = ax4.tricontourf(
            x,
            y,
            tri,
            viscosity,
            levels=40,
            # vmin=self.mu(800, .1), vmax=self.mu(600, .1),
            cmap=cmap_r)
        c5 = ax5.tricontourf(
            x,
            y,
            tri,
            density,
            levels=40,
            # vmin=self.rho(800.), vmax=self.rho(600.),
            cmap=cmap_r)
        plt.colorbar(c2, ax=ax2)
        plt.colorbar(c3, ax=ax3, ticks=[temperature.min(), temperature.max()])
        plt.colorbar(c4, ax=ax4, ticks=[viscosity.min(), viscosity.max()])
        plt.colorbar(c5, ax=ax5, ticks=[density.min(), density.max()])
        ax1.set_aspect("equal")
        ax2.set_aspect("equal")
        ax3.set_aspect("equal")
        ax4.set_aspect("equal")
        ax5.set_aspect("equal")
        ax1.set_title("velocity\n{:.4f} ... {:.5f} m/s".format(
            magnitude.min(), magnitude.max()))
        ax2.set_title("pressure")
        ax3.set_title("temperature")
        ax4.set_title("viscosity")
        ax5.set_title("density")
        ax1.set_xlim([-.1, 1.1])
        ax1.set_ylim([-.1, 1.1])
        # plt.tight_layout()
        return fig, (ax1, ax2)
예제 #16
0
def gmsh_to_dolfin_mesh(ifilename, handler):
    """Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
    parser implemented as a state machine:

        0 = read 'MeshFormat'
        1 = read  mesh format data
        2 = read 'EndMeshFormat'
        3 = read 'Nodes'
        4 = read  number of vertices
        5 = read  vertices
        6 = read 'EndNodes'
        7 = read 'Elements'
        8 = read  number of cells
        9 = read  cells
        10 = done

    Afterwards, extract physical region numbers if they are defined in
    the mesh file as a mesh function.

    """

    print "Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format"

    # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
    gmsh_dim = {1: 1, 2: 2, 4: 3}
    gmsh_cell_type = {1: "interval", 2: "triangle", 3: "tetrahedron"}
    # the gmsh element types supported for conversion
    supported_gmsh_element_types = [1, 2, 4]

    # Open files
    ifile = open(ifilename, "r")

    # Scan file for cell type
    cell_type = None
    highest_dim = 0
    line = ifile.readline()
    while line:

        # Remove newline
        if line[-1] == "\n":
            line = line[:-1]

        # Read dimension
        if line.find("$Elements") == 0:

            line = ifile.readline()
            num_elements = int(line)
            num_cells_counted = 0
            if num_elements == 0:
                _error("No elements found in gmsh file.")
            line = ifile.readline()

            # Now iterate through elements to find largest dimension.  Gmsh
            # format might include elements of lower dimensions in the element list.
            # We also need to count number of elements of correct dimensions.
            # Also determine which vertices are not used.
            dim_count = {1: 0, 2: 0, 3: 0}
            vertices_used = {1: [], 2: [], 3: []}
            # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
            tags_for_dim = {1: [], 2: [], 3: []}

            while line.find("$EndElements") == -1:
                element = line.split()
                elem_type = int(element[1])
                num_tags = int(element[2])

                if elem_type in supported_gmsh_element_types:
                    dim = gmsh_dim[elem_type]
                    if highest_dim < dim:
                        highest_dim = dim

                    node_num_list = [
                        int(node) for node in element[3 + num_tags:]
                    ]
                    vertices_used[dim].extend(node_num_list)
                    if num_tags > 0:
                        tags_for_dim[dim].append(
                            tuple(int(tag) for tag in element[3:3 + num_tags]))
                    dim_count[dim] += 1
                else:
                    #TODO: output a warning here. "gmsh element type %d not supported" % elem_type
                    pass

                line = ifile.readline()
        else:
            # Read next line
            line = ifile.readline()

    # Check that we got the cell type and set num_cells_counted
    if highest_dim == 0:
        _error("Unable to find cells of supported type.")

    num_cells_counted = dim_count[highest_dim]
    vertex_set = set(vertices_used[highest_dim])
    vertices_used[highest_dim] = None

    vertex_dict = {}
    for n, v in enumerate(vertex_set):
        vertex_dict[v] = n

    # Step to beginning of file
    ifile.seek(0)

    # Set mesh type
    handler.set_mesh_type(gmsh_cell_type[highest_dim], highest_dim)

    # Initialise node list (gmsh does not export all vertexes in order)
    nodelist = {}

    # Current state
    state = 0

    # Write data
    num_vertices_read = 0
    num_cells_read = 0

    # Now handle the facet markings
    if len(tags_for_dim[highest_dim - 1]) > 0:
        # first construct the mesh
        from dolfin import MeshEditor, Mesh
        mesh = Mesh()
        me = MeshEditor()
        me.open(mesh, highest_dim, highest_dim)
    else:
        me = None

    while state != 10:

        # Read next line
        line = ifile.readline()
        if not line: break

        # Skip comments
        if line[0] == '#':
            continue

        # Remove newline
        if line[-1] == "\n":
            line = line[:-1]

        if state == 0:
            if line == "$MeshFormat":
                state = 1
        elif state == 1:
            (version, file_type, data_size) = line.split()
            state = 2
        elif state == 2:
            if line == "$EndMeshFormat":
                state = 3
        elif state == 3:
            if line == "$Nodes":
                state = 4
        elif state == 4:
            num_vertices = len(vertex_dict)
            handler.start_vertices(num_vertices)
            if me is not None:
                me.init_vertices(num_vertices)
            state = 5
        elif state == 5:
            (node_no, x, y, z) = line.split()
            node_no = int(node_no)
            x, y, z = [float(xx) for xx in (x, y, z)]
            if vertex_dict.has_key(node_no):
                node_no = vertex_dict[node_no]
            else:
                continue
            nodelist[int(node_no)] = num_vertices_read
            handler.add_vertex(num_vertices_read, [x, y, z])
            if me is not None:
                if highest_dim == 1:
                    me.add_vertex(num_vertices_read, x)
                elif highest_dim == 2:
                    me.add_vertex(num_vertices_read, x, y)
                elif highest_dim == 3:
                    me.add_vertex(num_vertices_read, x, y, z)

            num_vertices_read += 1

            if num_vertices == num_vertices_read:
                handler.end_vertices()
                state = 6
        elif state == 6:
            if line == "$EndNodes":
                state = 7
        elif state == 7:
            if line == "$Elements":
                state = 8
        elif state == 8:
            handler.start_cells(num_cells_counted)
            if me is not None:
                me.init_cells(num_cells_counted)

            state = 9
        elif state == 9:
            element = line.split()
            elem_type = int(element[1])
            num_tags = int(element[2])
            if elem_type in supported_gmsh_element_types:
                dim = gmsh_dim[elem_type]
            else:
                dim = 0
            if dim == highest_dim:
                node_num_list = [
                    vertex_dict[int(node)] for node in element[3 + num_tags:]
                ]
                for node in node_num_list:
                    if not node in nodelist:
                        _error("Vertex %d of %s %d not previously defined." %
                               (node, gmsh_cell_type[dim], num_cells_read))
                cell_nodes = [nodelist[n] for n in node_num_list]
                handler.add_cell(num_cells_read, cell_nodes)

                if me is not None:
                    me.add_cell(num_cells_read, *cell_nodes)

                num_cells_read += 1

            if num_cells_counted == num_cells_read:
                handler.end_cells()
                if me is not None:
                    me.close()
                state = 10
        elif state == 10:
            break

    # Write mesh function based on the Physical Regions defined by
    # gmsh, but only if they are not all zero. All zero physical
    # regions indicate that no physical regions were defined.
    if highest_dim not in [1, 2, 3]:
        _error("Gmsh tags not supported for dimension %i. Probably a bug" %
               dim)

    tags = tags_for_dim[highest_dim]
    physical_regions = tuple(tag[0] for tag in tags)
    if not all(tag == 0 for tag in physical_regions):
        handler.start_meshfunction("physical_region", dim, num_cells_counted)
        for i, physical_region in enumerate(physical_regions):
            handler.add_entity_meshfunction(i, physical_region)
        handler.end_meshfunction()

    # Now process the facet markers
    tags = tags_for_dim[highest_dim - 1]
    if len(tags) > 0:

        print tags
        print vertices_used[highest_dim - 1]

        physical_regions = tuple(tag[0] for tag in tags)
        if not all(tag == 0 for tag in physical_regions):
            mesh.init(highest_dim - 1, 0)

            # Get the facet-node connectivity information (reshape as a row of node indices per facet)
            facets_as_nodes = mesh.topology()(highest_dim - 1, 0)().reshape(
                mesh.num_facets(), highest_dim)

            #            from dolfin import MeshFunction
            #            # Create and initialise the mesh function
            #            facet_mark_function = MeshFunction ( 'uint', mesh, highest_dim-1 )
            #            facet_mark_function.set_all( 0 )
            handler.start_meshfunction("facet_region", highest_dim - 1,
                                       mesh.num_facets())

            facets_to_check = range(mesh.num_facets())

            data = [int(0 * k) for k in range(len(facets_to_check))]

            for i, physical_region in enumerate(physical_regions):
                nodes = [
                    n - 1
                    for n in vertices_used[highest_dim -
                                           1][2 * i:(2 * i + highest_dim)]
                ]
                nodes.sort()

                if physical_region != 0:
                    found = False
                    for j in range(len(facets_to_check)):
                        index = facets_to_check[j]
                        if all(facets_as_nodes[index, k] == nodes[k]
                               for k in range(len(nodes))):
                            found = True
                            facets_to_check.pop(j)
                            # set the value of the mesh function
                            #                            facet_mark_function[index] = physical_region
                            data[index] = physical_region
                            break

                    if not found:
                        raise Exception(
                            "The facet (%d) was not found to mark: %s" %
                            (i, nodes))


#            fname = os.path.splitext('tmp.xml')[0]
#            mesh_function_file = File("%s_%s.xml" % (fname, "facet_region"))
#            mesh_function_file << facet_mark_function

            for index, physical_region in enumerate(data):
                handler.add_entity_meshfunction(index, physical_region)
            handler.end_meshfunction()

            mf = MeshFunction('uint', mesh, 'tmp_facet_region.xml')
            plot(mf, interactive=True)

    # Check that we got all data
    if state == 10:
        print "Conversion done"
    else:
        _error(
            "Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?"
        )

    # Close files
    ifile.close()
예제 #17
0
def create_submesh(mesh, markers):
    mpi_comm = mesh.mpi_comm()
    if not has_pybind11():
        mpi_comm = mpi_comm.tompi4py()
    assert isinstance(markers, MeshFunctionBool)
    assert markers.dim() == mesh.topology().dim()
    marker_id = True
    
    # == 1. Extract marked cells == #
    # Dolfin does not support a distributed mesh that is empty on some processes.
    # cbcpost gets around this by moving a single cell from the a non-empty processor to an empty one.
    # Note that, however, this cannot work if the number of marked cell is less than the number of processors.
    # In the interest of considering this case, we enable at least one cell (arbitrarily) on each processor.
    # We find this solution acceptable for our purposes, despite the increase of the reduced mesh size,
    # since we are never actually interested in solving a PDE on the reduced mesh, but rather only in
    # assemblying tensors on it and extract their values at some locations.
    backup_first_marker_id = None
    if marker_id not in markers.array():
        backup_first_marker_id = markers.array()[0]
        markers.array()[0] = marker_id
    assert marker_id in markers.array()
    
    # == 2. Create submesh == #
    submesh = Mesh(mesh.mpi_comm())
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())
    # Extract cells from mesh with specified marker_id
    mesh_cell_indices = where(markers.array() == marker_id)[0]
    mesh_cells = mesh.cells()[mesh_cell_indices]
    mesh_global_cell_indices = sorted([mesh.topology().global_indices(mesh.topology().dim())[cell_index] for cell_index in mesh_cell_indices])
    # Get vertices of extracted cells
    mesh_vertex_indices = unique(mesh_cells.flatten())
    mesh_global_vertex_indices = sorted([mesh.topology().global_indices(0)[vertex_index] for vertex_index in mesh_vertex_indices])
    # Number vertices in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all vertices from all processors
    allgathered_mesh_global_vertex_indices__non_empty_processors = list()
    allgathered_mesh_global_vertex_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_vertex_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
        else:
            allgathered_mesh_global_vertex_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_vertex_indices, root=r))
    allgathered_mesh_global_vertex_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__non_empty_processors))
    allgathered_mesh_global_vertex_indices__empty_processors = sorted(unique(allgathered_mesh_global_vertex_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global vertex index to submesh global vertex index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_vertex_global_indices = dict()
    _submesh_vertex_global_index = 0
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__non_empty_processors:
        assert mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices
        allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
        _submesh_vertex_global_index += 1
    for mesh_vertex_global_index in allgathered_mesh_global_vertex_indices__empty_processors:
        if mesh_vertex_global_index not in allgathered_mesh_to_submesh_vertex_global_indices:
            allgathered_mesh_to_submesh_vertex_global_indices[mesh_vertex_global_index] = _submesh_vertex_global_index
            _submesh_vertex_global_index += 1
    # Number cells in a way which is independent from the number of processors. To do so ...
    # ... first of all collect all cells from all processors
    allgathered_mesh_global_cell_indices__non_empty_processors = list()
    allgathered_mesh_global_cell_indices__empty_processors = list()
    for r in range(mpi_comm.size):
        backup_first_marker_id_r = mpi_comm.bcast(backup_first_marker_id, root=r)
        if backup_first_marker_id_r is None:
            allgathered_mesh_global_cell_indices__non_empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
        else:
            allgathered_mesh_global_cell_indices__empty_processors.extend(mpi_comm.bcast(mesh_global_cell_indices, root=r))
    allgathered_mesh_global_cell_indices__non_empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__non_empty_processors))
    allgathered_mesh_global_cell_indices__empty_processors = sorted(unique(allgathered_mesh_global_cell_indices__empty_processors))
    # ... then create a dict that will contain the map from mesh global cell index to submesh global cell index.
    # ... Here make sure to number first "real" vertices (those coming from non empty processors), since the other ones
    # ... are just a side effect of the current partitioning!
    allgathered_mesh_to_submesh_cell_global_indices = dict()
    _submesh_cell_global_index = 0
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__non_empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    for mesh_cell_global_index in allgathered_mesh_global_cell_indices__empty_processors:
        assert mesh_cell_global_index not in allgathered_mesh_to_submesh_cell_global_indices
        allgathered_mesh_to_submesh_cell_global_indices[mesh_cell_global_index] = _submesh_cell_global_index
        _submesh_cell_global_index += 1
    # Also create a mapping from mesh local vertex index to submesh local vertex index.
    mesh_to_submesh_vertex_local_indices = dict(zip(mesh_vertex_indices, list(range(len(mesh_vertex_indices)))))
    # Also create a mapping from mesh local cell index to submesh local cell index.
    mesh_to_submesh_cell_local_indices = dict(zip(mesh_cell_indices, list(range(len(mesh_cell_indices)))))
    # Now, define submesh cells
    submesh_cells = list()
    for i, c in enumerate(mesh_cells):
        submesh_cells.append([mesh_to_submesh_vertex_local_indices[j] for j in c])
    # Store vertices as submesh_vertices[local_index] = (global_index, coordinates)
    submesh_vertices = dict()
    for mesh_vertex_local_index, submesh_vertex_local_index in mesh_to_submesh_vertex_local_indices.items():
        submesh_vertices[submesh_vertex_local_index] = (
            allgathered_mesh_to_submesh_vertex_global_indices[mesh.topology().global_indices(0)[mesh_vertex_local_index]],
            mesh.coordinates()[mesh_vertex_local_index]
        )
    # Collect the global number of vertices and cells
    global_num_cells = mpi_comm.allreduce(len(submesh_cells), op=SUM)
    global_num_vertices = len(allgathered_mesh_to_submesh_vertex_global_indices)
    # Fill in mesh_editor
    mesh_editor.init_vertices_global(len(submesh_vertices), global_num_vertices)
    mesh_editor.init_cells_global(len(submesh_cells), global_num_cells)
    for local_index, cell_vertices in enumerate(submesh_cells):
        if has_pybind11():
            mesh_editor.add_cell(local_index, cell_vertices)
        else:
            mesh_editor.add_cell(local_index, *cell_vertices)
    for local_index, (global_index, coordinates) in submesh_vertices.items():
        mesh_editor.add_vertex_global(local_index, global_index, coordinates)
    mesh_editor.close()
    # Initialize topology
    submesh.topology().init(0, len(submesh_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(submesh_cells), global_num_cells)
    # Correct the global index of cells
    for local_index in range(len(submesh_cells)):
        submesh.topology().set_global_index(
            submesh.topology().dim(),
            local_index,
            allgathered_mesh_to_submesh_cell_global_indices[mesh_global_cell_indices[local_index]]
        )
    
    # == 3. Store (local) mesh to/from submesh map for cells, facets and vertices == #
    # Cells
    submesh.mesh_to_submesh_cell_local_indices = mesh_to_submesh_cell_local_indices
    submesh.submesh_to_mesh_cell_local_indices = mesh_cell_indices
    # Vertices
    submesh.mesh_to_submesh_vertex_local_indices = mesh_to_submesh_vertex_local_indices
    submesh.submesh_to_mesh_vertex_local_indices = mesh_vertex_indices
    # Facets
    mesh_vertices_to_mesh_facets = dict()
    mesh_facets_to_mesh_vertices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facet_vertices = list()
            for mesh_facet_vertex in vertices(mesh_facet):
                mesh_facet_vertices.append(mesh_facet_vertex.index())
            mesh_facet_vertices = tuple(sorted(mesh_facet_vertices))
            if mesh_facet_vertices in mesh_vertices_to_mesh_facets:
                assert mesh_vertices_to_mesh_facets[mesh_facet_vertices] == mesh_facet.index()
            else:
                mesh_vertices_to_mesh_facets[mesh_facet_vertices] = mesh_facet.index()
            if mesh_facet.index() in mesh_facets_to_mesh_vertices:
                assert mesh_facets_to_mesh_vertices[mesh_facet.index()] == mesh_facet_vertices
            else:
                mesh_facets_to_mesh_vertices[mesh_facet.index()] = mesh_facet_vertices
    submesh_vertices_to_submesh_facets = dict()
    submesh_facets_to_submesh_vertices = dict()
    for submesh_facet in facets(submesh):
        submesh_facet_vertices = list()
        for submesh_facet_vertex in vertices(submesh_facet):
            submesh_facet_vertices.append(submesh_facet_vertex.index())
        submesh_facet_vertices = tuple(sorted(submesh_facet_vertices))
        assert submesh_facet_vertices not in submesh_vertices_to_submesh_facets
        submesh_vertices_to_submesh_facets[submesh_facet_vertices] = submesh_facet.index()
        assert submesh_facet.index() not in submesh_facets_to_submesh_vertices
        submesh_facets_to_submesh_vertices[submesh_facet.index()] = submesh_facet_vertices
    mesh_to_submesh_facets_local_indices = dict()
    for (mesh_facet_index, mesh_vertices) in mesh_facets_to_mesh_vertices.items():
        submesh_vertices = tuple(sorted([submesh.mesh_to_submesh_vertex_local_indices[mesh_vertex] for mesh_vertex in mesh_vertices]))
        submesh_facet_index = submesh_vertices_to_submesh_facets[submesh_vertices]
        mesh_to_submesh_facets_local_indices[mesh_facet_index] = submesh_facet_index
    submesh_to_mesh_facets_local_indices = dict()
    for (submesh_facet_index, submesh_vertices) in submesh_facets_to_submesh_vertices.items():
        mesh_vertices = tuple(sorted([submesh.submesh_to_mesh_vertex_local_indices[submesh_vertex] for submesh_vertex in submesh_vertices]))
        mesh_facet_index = mesh_vertices_to_mesh_facets[mesh_vertices]
        submesh_to_mesh_facets_local_indices[submesh_facet_index] = mesh_facet_index
    submesh.mesh_to_submesh_facet_local_indices = mesh_to_submesh_facets_local_indices
    submesh.submesh_to_mesh_facet_local_indices = list()
    assert min(submesh_to_mesh_facets_local_indices.keys()) == 0
    assert max(submesh_to_mesh_facets_local_indices.keys()) == len(submesh_to_mesh_facets_local_indices.keys()) - 1
    for submesh_facet_index in range(len(submesh_to_mesh_facets_local_indices)):
        submesh.submesh_to_mesh_facet_local_indices.append(submesh_to_mesh_facets_local_indices[submesh_facet_index])
    # == 3bis. Prepare (temporary) global indices of facets == #
    # Wrapper to DistributedMeshTools::number_entities
    if has_pybind11():
        cpp_code = """
            #include <pybind11/pybind11.h>
            #include <dolfin/mesh/DistributedMeshTools.h>
            #include <dolfin/mesh/Mesh.h>
            
            void initialize_global_indices(std::shared_ptr<dolfin::Mesh> mesh, std::size_t dim)
            {
                dolfin::DistributedMeshTools::number_entities(*mesh, dim);
            }
            
            PYBIND11_MODULE(SIGNATURE, m)
            {
                m.def("initialize_global_indices", &initialize_global_indices);
            }
        """
        initialize_global_indices = compile_cpp_code(cpp_code).initialize_global_indices
    else:
        cpp_code = """
            void initialize_global_indices(Mesh & mesh, std::size_t dim)
            {
                DistributedMeshTools::number_entities(mesh, dim);
            }
        """
        initialize_global_indices = compile_extension_module(cpp_code, additional_system_headers=["dolfin/mesh/DistributedMeshTools.h"]).initialize_global_indices
    initialize_global_indices(mesh, mesh.topology().dim() - 1)
    # Prepare global indices of facets
    mesh_facets_local_to_global_indices = dict()
    for mesh_cell_index in mesh_cell_indices:
        mesh_cell = Cell(mesh, mesh_cell_index)
        for mesh_facet in facets(mesh_cell):
            mesh_facets_local_to_global_indices[mesh_facet.index()] = mesh_facet.global_index()
    mesh_facets_global_indices_in_submesh = list()
    for mesh_facet_local_index in mesh_to_submesh_facets_local_indices.keys():
        mesh_facets_global_indices_in_submesh.append(mesh_facets_local_to_global_indices[mesh_facet_local_index])
    allgathered__mesh_facets_global_indices_in_submesh = list()
    for r in range(mpi_comm.size):
        allgathered__mesh_facets_global_indices_in_submesh.extend(mpi_comm.bcast(mesh_facets_global_indices_in_submesh, root=r))
    allgathered__mesh_facets_global_indices_in_submesh = sorted(set(allgathered__mesh_facets_global_indices_in_submesh))
    mesh_to_submesh_facets_global_indices = dict()
    for (submesh_facet_global_index, mesh_facet_global_index) in enumerate(allgathered__mesh_facets_global_indices_in_submesh):
        mesh_to_submesh_facets_global_indices[mesh_facet_global_index] = submesh_facet_global_index
    submesh_facets_local_to_global_indices = dict()
    for (submesh_facet_local_index, mesh_facet_local_index) in submesh_to_mesh_facets_local_indices.items():
        submesh_facets_local_to_global_indices[submesh_facet_local_index] = mesh_to_submesh_facets_global_indices[mesh_facets_local_to_global_indices[mesh_facet_local_index]]
    
    # == 4. Assign shared vertices == #
    shared_entities_dimensions = {
        "vertex": 0,
        "facet": submesh.topology().dim() - 1,
        "cell": submesh.topology().dim()
    }
    shared_entities_class = {
        "vertex": Vertex,
        "facet": Facet,
        "cell": Cell
    }
    shared_entities_iterator = {
        "vertex": vertices,
        "facet": facets,
        "cell": cells
    }
    shared_entities_submesh_global_index_getter = {
        "vertex": lambda entity: entity.global_index(),
        "facet": lambda entity: submesh_facets_local_to_global_indices[entity.index()],
        "cell": lambda entity: entity.global_index()
    }
    for entity_type in ["vertex", "facet", "cell"]: # do not use .keys() because the order is important
        dim = shared_entities_dimensions[entity_type]
        class_ = shared_entities_class[entity_type]
        iterator = shared_entities_iterator[entity_type]
        submesh_global_index_getter = shared_entities_submesh_global_index_getter[entity_type]
        # Get shared entities from mesh. A subset of these will end being shared entities also the submesh
        # (thanks to the fact that we do not redistribute cells from one processor to another)
        if mpi_comm.size > 1: # some entities may not be initialized in serial, since they are not needed
            assert mesh.topology().have_shared_entities(dim), "Mesh shared entities have not been initialized for dimension " + str(dim)
        if mesh.topology().have_shared_entities(dim): # always true in parallel (when really needed)
            # However, it may happen that an entity which has been selected is not shared anymore because only one of
            # the sharing processes has it in the submesh. For instance, consider the case
            # of two cells across the interface (located on a facet f) between two processors. It may happen that
            # only one of the two cells is selected: the facet f and its vertices are not shared anymore!
            # For this reason, we create a new dict from global entity index to processors sharing them. Thus ...
            # ... first of all get global indices corresponding to local entities
            if entity_type in ["vertex", "cell"]:
                assert submesh.topology().have_global_indices(dim), "Submesh global indices have not been initialized for dimension " + str(dim)
            submesh_local_entities_global_index = list()
            submesh_local_entities_global_to_local_index = dict()
            for entity in iterator(submesh):
                local_entity_index = entity.index()
                global_entity_index = submesh_global_index_getter(entity)
                submesh_local_entities_global_index.append(global_entity_index)
                submesh_local_entities_global_to_local_index[global_entity_index] = local_entity_index
            # ... then gather all global indices from all processors
            gathered__submesh_local_entities_global_index = list() # over processor id
            for r in range(mpi_comm.size):
                gathered__submesh_local_entities_global_index.append(mpi_comm.bcast(submesh_local_entities_global_index, root=r))
            # ... then create dict from global index to processors sharing it
            submesh_shared_entities__global = dict()
            for r in range(mpi_comm.size):
                for global_entity_index in gathered__submesh_local_entities_global_index[r]:
                    if global_entity_index not in submesh_shared_entities__global:
                        submesh_shared_entities__global[global_entity_index] = list()
                    submesh_shared_entities__global[global_entity_index].append(r)
            # ... and finally popuplate shared entities dict, which is the same as the dict above except that
            # the current processor rank is removed and a local indexing is used
            submesh_shared_entities = dict() # from local index to list of integers
            for (global_entity_index, processors) in submesh_shared_entities__global.items():
                if (
                    mpi_comm.rank in processors  # only local entities
                        and
                    len(processors) > 1 # it was still shared after submesh extraction
                ):
                    other_processors_list = list(processors)
                    other_processors_list.remove(mpi_comm.rank)
                    other_processors = array(other_processors_list, dtype=uintp)
                    submesh_shared_entities[submesh_local_entities_global_to_local_index[global_entity_index]] = other_processors

            # Need an extension module to populate shared_entities because in python each call to shared_entities
            # returns a temporary.
            if has_pybind11():
                cpp_code = """
                    #include <Eigen/Core>
                    #include <pybind11/pybind11.h>
                    #include <pybind11/eigen.h>
                    #include <dolfin/mesh/Mesh.h>
                    
                    using OtherProcesses = Eigen::Ref<const Eigen::Matrix<std::size_t, Eigen::Dynamic, 1>>;
                    
                    void set_shared_entities(std::shared_ptr<dolfin::Mesh> submesh, std::size_t idx, const OtherProcesses other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh->topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                    
                    PYBIND11_MODULE(SIGNATURE, m)
                    {
                        m.def("set_shared_entities", &set_shared_entities);
                    }
                """
                set_shared_entities = compile_cpp_code(cpp_code).set_shared_entities
            else:
                cpp_code = """
                    void set_shared_entities(Mesh & submesh, std::size_t idx, const Array<std::size_t>& other_processes, std::size_t dim)
                    {
                        std::set<unsigned int> set_other_processes;
                        for (std::size_t i(0); i < other_processes.size(); i++)
                            set_other_processes.insert(other_processes[i]);
                        submesh.topology().shared_entities(dim)[idx] = set_other_processes;
                    }
                """
                set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
            for (submesh_entity_local_index, other_processors) in submesh_shared_entities.items():
                set_shared_entities(submesh, submesh_entity_local_index, other_processors, dim)
                
            log(DEBUG, "Local indices of shared entities for dimension " + str(dim) + ": " + str(list(submesh.topology().shared_entities(0).keys())))
            log(DEBUG, "Global indices of shared entities for dimension " + str(dim) + ": " + str([class_(submesh, local_index).global_index() for local_index in submesh.topology().shared_entities(dim).keys()]))
    
    # == 5. Also initialize submesh facets global indices, now that shared facets have been computed == #
    initialize_global_indices(submesh, submesh.topology().dim() - 1) # note that DOLFIN might change the numbering when compared to the one at 3bis
    
    # == 6. Restore backup_first_marker_id and return == #
    if backup_first_marker_id is not None:
        markers.array()[0] = backup_first_marker_id
    return submesh
예제 #18
0
def load_geometry_from_h5(
    h5name,
    h5group="",
    fendo=None,
    fepi=None,
    include_sheets=True,
    comm=mpi_comm_world,
):
    """Load geometry and other mesh data from
    a h5file to an object.
    If the file contains muliple fiber fields
    you can spefify the angles, and if the file
    contais sheets and cross-sheets this can also
    be included

    :param str h5name: Name of the h5file
    :param str h5group: The group within the file
    :param int fendo: Helix fiber angle (endocardium) (if available)
    :param int fepi: Helix fiber angle (epicardium) (if available)
    :param bool include_sheets: Include sheets and cross-sheets
    :returns: An object with geometry data
    :rtype: object

    """
    # Set default groups
    ggroup = "{}/geometry".format(h5group)
    mgroup = "{}/mesh".format(ggroup)
    lgroup = "{}/local basis functions".format(h5group)
    fgroup = "{}/microstructure".format(h5group)

    if not os.path.isfile(h5name):
        raise IOError("File {} does not exist".format(h5name))

    # Check that the given file contains
    # the geometry in the given h5group
    if not check_h5group(h5name, mgroup, delete=False, comm=comm):
        msg = ("Error!\nGroup: '{}' does not exist in file:"
               "\n{}").format(mgroup, h5name)

        with h5py.File(h5name) as h:
            keys = h.keys()
        msg += "\nPossible values for the h5group are {}".format(keys)
        raise IOError(msg)

    # Dummy class to store attributes in
    class Geometry(object):
        pass

    geo = Geometry()

    with HDF5File(comm, h5name, "r") as h5file:

        # Load mesh
        mesh = Mesh(comm)
        h5file.read(mesh, mgroup, False)
        geo.mesh = mesh

        # Get mesh functions
        meshfunctions = ["vfun", "efun", "ffun", "cfun"]\
            if mesh.topology().dim() == 3 else ["vfun", "ffun", "cfun"]

        for dim, attr in enumerate(meshfunctions):
            dgroup = "{}/mesh/meshfunction_{}".format(ggroup, dim)
            mf = MeshFunction("size_t", mesh, dim, mesh.domains())

            if h5file.has_dataset(dgroup):
                h5file.read(mf, dgroup)
                setattr(geo, attr, mf)

        load_local_basis(h5file, lgroup, mesh, geo)
        load_microstructure(h5file, fgroup, mesh, geo, include_sheets)

        # Load the boundary markers
        markers = load_markers(h5file, mesh, ggroup, dgroup)
        geo.markers = markers

        origmeshgroup = "{}/original_geometry".format(h5group)
        if h5file.has_dataset(origmeshgroup):
            original_mesh = Mesh(comm)
            h5file.read(original_mesh, origmeshgroup, True)
            setattr(geo, "original_geometry", original_mesh)

    for attr in meshfunctions:
        if not hasattr(geo, attr):
            setattr(geo, attr, None)

    for attr in (["f0", "s0", "n0", "r0", "c0", "l0"]):
        if not hasattr(geo, attr):
            setattr(geo, attr, None)

    return geo
예제 #19
0
def create_slice(basemesh, point, normal, closest_region=False, crinkle_clip=False):
    """Create a slicemesh from a basemesh.

    :param basemesh: Mesh to slice
    :param point: Point in slicing plane
    :param normal: Normal to slicing plane
    :param closest_region: Set to True to extract disjoint region closest to specified point
    :param crinkle_clip: Set to True to return mesh of same topological dimension as basemesh

    .. note::

        Only 3D-meshes currently supported for slicing.

    .. warning::

        Slice-instances are intended for visualization only, and may produce erronous
        results if used for computations.

    """
    assert basemesh.geometry().dim() == 3, "Can only slice 3D-meshes."

    P = np.array([point[0], point[1], point[2]], dtype=np.double)

    # Create unit normal
    n = np.array([normal[0],normal[1], normal[2]])
    n = n/np.linalg.norm(n)
    #self.n = Constant((n[0], n[1], n[2]))

    # Calculate the distribution of vertices around the plane
    # (sign of np.dot(p-P, n) determines which side of the plane p is on)
    vsplit = np.dot(basemesh.coordinates()-P, n)

    # Count each cells number of vertices on the "positive" side of the plane
    # Only cells with vertices on both sides of the plane intersect the plane
    operator = np.less
    npos = np.sum(vsplit[basemesh.cells()] < 0, 1)

    intersection_cells = basemesh.cells()[(npos > 0) & (npos < 4)]

    if len(intersection_cells) == 0:
        # Try to put "zeros" on other side of plane
        # FIXME: handle cells with vertices exactly intersecting the plane in a more robust manner.
        operator = np.greater
        npos = np.sum(vsplit[basemesh.cells()] > 0, 1)
        #cell_indices = (npos > 0) & (npos < 4)
        intersection_cells = basemesh.cells()[(npos > 0) & (npos < 4)]

    if crinkle_clip:
        cf = CellFunction("size_t", basemesh)
        cf.set_all(0)
        cf.array()[(npos>0) & (npos<4)] = 1
        mesh = create_submesh(basemesh, cf, 1)
    else:
        def add_cell(cells, cell):
            # Split cell into triangles
            for i in xrange(len(cell)-2):
                cells.append(cell[i:i+3])

        cells = []
        index = 0
        indexes = {}
        for c in intersection_cells:
            a = operator(vsplit[c], 0)
            positives = c[np.where(a==True)[0]]
            negatives = c[np.where(a==False)[0]]

            cell = []
            for pp_ind in positives:
                pp = basemesh.coordinates()[pp_ind]

                for pn_ind in negatives:
                    pn = basemesh.coordinates()[pn_ind]
                    if (pp_ind, pn_ind) not in indexes:
                        # Calculate intersection point with the plane
                        d = np.dot(P-pp, n)/np.dot(pp-pn, n)
                        ip = pp+(pp-pn)*d

                        indexes[(pp_ind, pn_ind)] = (index, ip)
                        index += 1

                    cell.append(indexes[(pp_ind, pn_ind)][0])


            add_cell(cells, cell)
        MPI.barrier(mpi_comm_world())

        # Assign global indices
        # TODO: Assign global indices properly
        dist = distribution(index)
        global_idx = sum(dist[:MPI.rank(mpi_comm_world())])
        vertices = {}
        for idx, p in indexes.values():
            vertices[idx] = (global_idx, p)
            global_idx += 1


        global_num_cells = MPI.sum(mpi_comm_world(), len(cells))
        global_num_vertices = MPI.sum(mpi_comm_world(), len(vertices))

        mesh = Mesh()

        # Return empty mesh if no intersections were found
        if global_num_cells == 0:
            mesh_editor = MeshEditor()
            mesh_editor.open(mesh, "triangle", 2, 3)

            mesh_editor.init_vertices(0)
            mesh_editor.init_cells(0)

            mesh_editor.close()
        else:

            # Distribute mesh if empty on any processors
            cells, vertices = distribute_meshdata(cells, vertices)

            # Build mesh
            mesh_editor = MeshEditor()
            mesh_editor.open(mesh, "triangle", 2, 3)

            mesh_editor.init_vertices(len(vertices))
            mesh_editor.init_cells(len(cells))

            for index, cell in enumerate(cells):
                mesh_editor.add_cell(index, cell[0], cell[1], cell[2])

            for local_index, (global_index, coordinates) in vertices.items():
                mesh_editor.add_vertex_global(int(local_index), int(global_index), coordinates)

            mesh_editor.close()
            mesh.topology().init(0, len(vertices), global_num_vertices)
            mesh.topology().init(2, len(cells), global_num_cells)


    if closest_region and mesh.size_global(0) > 0:
        assert MPI.size(mpi_comm_world())==1, "Extract closest region does not work in parallel"
        regions = compute_connectivity(mesh)
        i,d = mesh.bounding_box_tree().compute_closest_entity(Point(P))

        if d == MPI.min(mesh.mpi_comm(), d):
            v = regions[int(i)]
        else:
            v = 0

        v = MPI.max(mesh.mpi_comm(), v)
        mesh = create_submesh(mesh, regions, v)

    return mesh
예제 #20
0
    def test_convert_triangle(self): # Disabled because it fails, see FIXME below
        # test no. 1
        from dolfin import Mesh, MPI
        if MPI.num_processes() != 1:
            return
        fname = os.path.join("data", "triangle")
        dfname = fname+".xml"

        # Read triangle file and convert to a dolfin xml mesh file
        meshconvert.triangle2xml(fname, dfname)

        # Read in dolfin mesh and check number of cells and vertices
        mesh = Mesh(dfname)
        self.assertEqual(mesh.num_vertices(), 96)
        self.assertEqual(mesh.num_cells(), 159)

        # Clean up
        os.unlink(dfname)


        # test no. 2
        from dolfin import MPI, Mesh, MeshFunction, \
                           edges, Edge, faces, Face, \
                           SubsetIterator, facets, CellFunction
        if MPI.num_processes() != 1:
            return
        fname = os.path.join("data", "test_Triangle_3")
        dfname = fname+".xml"
        dfname0 = fname+".attr0.xml"

        # Read triangle file and convert to a dolfin xml mesh file
        meshconvert.triangle2xml(fname, dfname)

        # Read in dolfin mesh and check number of cells and vertices
        mesh = Mesh(dfname)
        mesh.init()
        mfun = MeshFunction('double', mesh, dfname0)
        self.assertEqual(mesh.num_vertices(), 58)
        self.assertEqual(mesh.num_cells(), 58)

        # Create a size_t CellFunction and assign the values based on the
        # converted Meshfunction
        cf = CellFunction("size_t", mesh)
        cf.array()[mfun.array()==10.0] = 0
        cf.array()[mfun.array()==-10.0] = 1

        # Meassure total area of cells with 1 and 2 marker
        add = lambda x, y : x+y
        area0 = reduce(add, (Face(mesh, cell.index()).area() \
                             for cell in SubsetIterator(cf, 0)), 0.0)
        area1 = reduce(add, (Face(mesh, cell.index()).area() \
                             for cell in SubsetIterator(cf, 1)), 0.0)
        total_area = reduce(add, (face.area() for face in faces(mesh)), 0.0)

        # Check that all cells in the two domains are either above or below y=0
        self.assertTrue(all(cell.midpoint().y()<0 for cell in SubsetIterator(cf, 0)))
        self.assertTrue(all(cell.midpoint().y()>0 for cell in SubsetIterator(cf, 1)))

        # Check that the areas add up
        self.assertAlmostEqual(area0+area1, total_area)

        # Measure the edge length of the two edge domains
        #edge_markers = mesh.domains().facet_domains()
        edge_markers = mesh.domains().markers(mesh.topology().dim()-1)
        self.assertTrue(edge_markers is not None)
        #length0 = reduce(add, (Edge(mesh, e.index()).length() \
        #                    for e in SubsetIterator(edge_markers, 0)), 0.0)
        length0, length1 = 0.0, 0.0
        for item in edge_markers.items():
            if item[1] == 0:
                e = Edge(mesh, int(item[0]))
                length0 +=  Edge(mesh, int(item[0])).length()
            elif item [1] == 1:
                length1 +=  Edge(mesh, int(item[0])).length()

        # Total length of all edges and total length of boundary edges
        total_length = reduce(add, (e.length() for e in edges(mesh)), 0.0)
        boundary_length = reduce(add, (Edge(mesh, f.index()).length() \
                          for f in facets(mesh) if f.exterior()), 0.0)

        # Check that the edges add up
        self.assertAlmostEqual(length0 + length1, total_length)
        self.assertAlmostEqual(length1, boundary_length)

        # Clean up
        os.unlink(dfname)
        os.unlink(dfname0)
예제 #21
0
    h5_file = convert(args.input)

    # VTK visualize tags
    if args.save_pvd or args.mesh_size:
        h5 = HDF5File(mpi_comm_world(), h5_file, 'r')
        mesh = Mesh()
        h5.read(mesh, 'mesh', False)

        info('Mesh has %d cells' % mesh.num_cells())
        info('Mesh has %d vertices' % mesh.num_vertices())
        info('Box size %s' %
             (mesh.coordinates().max(axis=0) - mesh.coordinates().min(axis=0)))

        hmin, hmax = mesh.hmin(), mesh.hmax()
        info('Mesh has sizes %g %g' % (hmin, hmax))

        root = os.path.splitext(args.input)[0]
        tdim = mesh.topology().dim()

        data_sets = ('curves', 'surfaces', 'volumes')
        dims = (1, tdim - 1, tdim)
        for ds, dim in zip(data_sets, dims):
            if h5.has_dataset(ds):
                f = MeshFunction('size_t', mesh, dim, 0)
                h5.read(f, ds)

                if args.save_pvd:
                    File('%s_%s.pvd' % (root, ds)) << f

    cleanup(exts=args.cleanup)
예제 #22
0
파일: meshconvert.py 프로젝트: alogg/dolfin
def gmsh2xml(ifilename, handler):
    """Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
    parser implemented as a state machine:

        0 = read 'MeshFormat'
        1 = read  mesh format data
        2 = read 'EndMeshFormat'
        3 = read 'Nodes'
        4 = read  number of vertices
        5 = read  vertices
        6 = read 'EndNodes'
        7 = read 'Elements'
        8 = read  number of cells
        9 = read  cells
        10 = done

    Afterwards, extract physical region numbers if they are defined in
    the mesh file as a mesh function.

    """

    print "Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format"

    # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
    gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
    cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
    # the gmsh element types supported for conversion
    supported_gmsh_element_types = [1, 2, 4, 15]

    # Open files
    ifile = open(ifilename, "r")

    # Scan file for cell type
    cell_type = None
    highest_dim = 0
    line = ifile.readline()
    while line:

        # Remove newline
        if line[-1] == "\n":
            line = line[:-1]

        # Read dimension
        if line.find("$Elements") == 0:

            line = ifile.readline()
            num_elements = int(line)
            if num_elements == 0:
                _error("No elements found in gmsh file.")
            line = ifile.readline()

            # Now iterate through elements to find largest dimension.  Gmsh
            # format might include elements of lower dimensions in the element list.
            # We also need to count number of elements of correct dimensions.
            # Also determine which vertices are not used.
            dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
            vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
            # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
            tags_for_dim = {0: [], 1: [], 2: [], 3: []}

            while line.find("$EndElements") == -1:
                element = line.split()
                elem_type = int(element[1])
                num_tags = int(element[2])
                if elem_type in supported_gmsh_element_types:
                    dim = gmsh_dim[elem_type]
                    if highest_dim < dim:
                        highest_dim = dim
                    node_num_list = [int(node) for node in element[3 + num_tags:]]
                    vertices_used_for_dim[dim].extend(node_num_list)
                    if num_tags > 0:
                        tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
                    dim_count[dim] += 1
                else:
                    #TODO: output a warning here. "gmsh element type %d not supported" % elem_type
                    pass
                line = ifile.readline()
        else:
            # Read next line
            line = ifile.readline()

    # Check that we got the cell type and set num_cells_counted
    if highest_dim == 0:
        _error("Unable to find cells of supported type.")

    num_cells_counted = dim_count[highest_dim]
    vertex_set = set(vertices_used_for_dim[highest_dim])
    vertices_used_for_dim[highest_dim] = None

    vertex_dict = {}
    for n,v in enumerate(vertex_set):
        vertex_dict[v] = n

    # Step to beginning of file
    ifile.seek(0)

    # Set mesh type
    handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)

    # Initialise node list (gmsh does not export all vertexes in order)
    nodelist = {}

    # Current state
    state = 0

    # Write data
    num_vertices_read = 0
    num_cells_read = 0

    # Only import the dolfin objects if facet markings exist
    process_facets = False
    if len(tags_for_dim[highest_dim-1]) > 0:
        # first construct the mesh
        try:
            from dolfin import MeshEditor, Mesh
        except ImportError:
            _error("DOLFIN must be installed to handle Gmsh boundary regions")
        mesh = Mesh()
        mesh_editor = MeshEditor ()
        mesh_editor.open( mesh, highest_dim, highest_dim )
        process_facets = True
    else:
        # TODO: Output a warning or an error here
        me = None

    while state != 10:

        # Read next line
        line = ifile.readline()
        if not line: break

        # Skip comments
        if line[0] == '#':
            continue

        # Remove newline
        if line[-1] == "\n":
            line = line[:-1]

        if state == 0:
            if line == "$MeshFormat":
                state = 1
        elif state == 1:
            (version, file_type, data_size) = line.split()
            state = 2
        elif state == 2:
            if line == "$EndMeshFormat":
                state = 3
        elif state == 3:
            if line == "$Nodes":
                state = 4
        elif state == 4:
            num_vertices = len(vertex_dict)
            handler.start_vertices(num_vertices)
            if process_facets:
                mesh_editor.init_vertices ( num_vertices )
            state = 5
        elif state == 5:
            (node_no, x, y, z) = line.split()
            node_no = int(node_no)
            x,y,z = [float(xx) for xx in (x,y,z)]
            if vertex_dict.has_key(node_no):
                node_no = vertex_dict[node_no]
            else:
                continue
            nodelist[int(node_no)] = num_vertices_read
            handler.add_vertex(num_vertices_read, [x, y, z])
            if process_facets:
                if highest_dim == 1:
                    coords = numpy.array([x])
                elif highest_dim == 2:
                    coords = numpy.array([x, y])
                elif highest_dim == 3:
                    coords = numpy.array([x, y, z])
                mesh_editor.add_vertex(num_vertices_read, coords)

            num_vertices_read +=1

            if num_vertices == num_vertices_read:
                handler.end_vertices()
                state = 6
        elif state == 6:
            if line == "$EndNodes":
                state = 7
        elif state == 7:
            if line == "$Elements":
                state = 8
        elif state == 8:
            handler.start_cells(num_cells_counted)
            if process_facets:
                mesh_editor.init_cells( num_cells_counted )

            state = 9
        elif state == 9:
            element = line.split()
            elem_type = int(element[1])
            num_tags  = int(element[2])
            if elem_type in supported_gmsh_element_types:
                dim = gmsh_dim[elem_type]
            else:
                dim = 0
            if dim == highest_dim:
                node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
                for node in node_num_list:
                    if not node in nodelist:
                        _error("Vertex %d of %s %d not previously defined." %
                              (node, cell_type_for_dim[dim], num_cells_read))
                cell_nodes = [nodelist[n] for n in node_num_list]
                handler.add_cell(num_cells_read, cell_nodes)

                if process_facets:
                    cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
                    mesh_editor.add_cell(num_cells_read, cell_nodes)

                num_cells_read +=1

            if num_cells_counted == num_cells_read:
                handler.end_cells()
                if process_facets:
                    mesh_editor.close()
                state = 10
        elif state == 10:
            break

    # Write mesh function based on the Physical Regions defined by
    # gmsh, but only if they are not all zero. All zero physical
    # regions indicate that no physical regions were defined.
    if highest_dim not in [1,2,3]:
        _error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)

    tags = tags_for_dim[highest_dim]
    physical_regions = tuple(tag[0] for tag in tags)
    if not all(tag == 0 for tag in physical_regions):
        handler.start_meshfunction("physical_region", dim, num_cells_counted)
        for i, physical_region in enumerate(physical_regions):
            handler.add_entity_meshfunction(i, physical_region)
        handler.end_meshfunction()

    # Now process the facet markers
    tags = tags_for_dim[highest_dim-1]
    if (len(tags) > 0) and (mesh is not None):
        physical_regions = tuple(tag[0] for tag in tags)
        if not all(tag == 0 for tag in physical_regions):
            mesh.init(highest_dim-1,0)

            # Get the facet-node connectivity information (reshape as a row of node indices per facet)
            if highest_dim==1:
              # for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
              # as facets are vertices
              facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
            else:
              facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )

            # Build the reverse map
            nodes_as_facets = {}
            for facet in range(mesh.num_facets()):
              nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet

            data = [int(0*k) for k in range(mesh.num_facets()) ]
            for i, physical_region in enumerate(physical_regions):
                nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
                nodes.sort()

                if physical_region != 0:
                    try:
                        index = nodes_as_facets[tuple(nodes)]
                        data[index] = physical_region
                    except IndexError:
                        raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )

#            # Create and initialise the mesh function
            handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
            for index, physical_region in enumerate ( data ):
                handler.add_entity_meshfunction(index, physical_region)
            handler.end_meshfunction()

    # Check that we got all data
    if state == 10:
        print "Conversion done"
    else:
       _error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")

    # Close files
    ifile.close()
예제 #23
0
    args = parser.parse_args()

    # Some sanity
    root, ext = os.path.splitext(args.mesh)
    assert args.m > 2 and args.n > 2
    assert ext == '.h5'

    # Typically we have files named foo_ncellsX_ncellsY.h5
    assert tuple(map(int, root.split('_')[-2:])) == (args.m, args.n)

    # Load the original 
    h5 = HDF5File(get_comm_world(), args.mesh, 'r')
    mesh = Mesh()
    h5.read(mesh, 'mesh', False)

    surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1, 0)
    h5.read(surfaces, 'surfaces')

    volumes = MeshFunction('size_t', mesh, mesh.topology().dim(), 0)
    h5.read(volumes, 'volumes')
    
    h5.close()

    # Remove layer
    tt = Timer('cleanup')
    ncells_x, ncells_y = args.m, args.n
    surfaces, volumes = deactivate_cells(surfaces, volumes, ncells_x, ncells_y)

    info('Removing boundary took %g s' % tt.stop())

    # Write 
예제 #24
0
    try:
        msh_file, h5_file = args.io[:2]
    except ValueError:
        msh_file = args.io[0]

        root, ext = os.path.splitext(msh_file)
        h5_file = '.'.join([root, 'h5'])

    assert convert(msh_file, h5_file, args.save_mvc)

    if args.save_pvd:
        h5 = HDF5File(mpi_comm_world(), h5_file, 'r')
        mesh = Mesh()
        h5.read(mesh, 'mesh', False)

        surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1, 0)
        volumes = MeshFunction('size_t', mesh, mesh.topology().dim(), 0)

        if not args.save_mvc:
            h5.read(surfaces, 'surfaces')
            h5.read(volumes, 'volumes')
        # The data is mesh value collections
        else:
            from tiling_cpp import fill_mf_from_mvc

            surfaces_mvc = MeshValueCollection('size_t', mesh, mesh.topology().dim()-1)
            h5.read(surfaces_mvc, 'surfaces')
            fill_mf_from_mvc(surfaces_mvc, surfaces)

            volumes_mvc = MeshValueCollection('size_t', mesh, mesh.topology().dim())
            h5.read(volumes_mvc, 'volumes')
예제 #25
0
def coil_in_box():
    mesh = Mesh("../meshes/2d/coil-in-box.xml")

    f = Constant(0.0)

    # Define mesh and boundaries.
    class LeftBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[0] < GMSH_EPS

    left_boundary = LeftBoundary()

    class RightBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[0] > 2.5 - GMSH_EPS

    right_boundary = RightBoundary()

    class LowerBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[1] < GMSH_EPS

    lower_boundary = LowerBoundary()

    class UpperBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[1] > 0.4 - GMSH_EPS

    upper_boundary = UpperBoundary()

    class CoilBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return (
                on_boundary
                and x[0] > GMSH_EPS
                and x[0] < 2.5 - GMSH_EPS
                and x[1] > GMSH_EPS
                and x[1] < 0.4 - GMSH_EPS
            )

    coil_boundary = CoilBoundary()

    # heater_temp = 380.0
    # room_temp = 293.0
    # bcs = [(coil_boundary, heater_temp),
    #        (left_boundary, room_temp),
    #        (right_boundary, room_temp),
    #        (upper_boundary, room_temp),
    #        (lower_boundary, room_temp)
    #        ]

    boundaries = {}
    boundaries["left"] = left_boundary
    boundaries["right"] = right_boundary
    boundaries["upper"] = upper_boundary
    boundaries["lower"] = lower_boundary
    boundaries["coil"] = coil_boundary

    boundaries = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    boundaries.set_all(0)
    left_boundary.mark(boundaries, 1)
    right_boundary.mark(boundaries, 2)
    upper_boundary.mark(boundaries, 3)
    lower_boundary.mark(boundaries, 4)
    coil_boundary.mark(boundaries, 5)

    boundary_indices = {"left": 1, "right": 2, "top": 3, "bottom": 4, "coil": 5}
    theta0 = Constant(293.0)
    return mesh, f, boundaries, boundary_indices, theta0