示例#1
0
def test_save_2D_facet_function(tempdir, encoding, data_type):
    if invalid_config(encoding):
        pytest.skip("XDMF unsupported in current configuration")

    dtype_str, dtype = data_type

    mesh = UnitSquareMesh(MPI.comm_world, 32, 32)
    mf = MeshFunction(dtype_str, mesh, mesh.topology.dim - 1, 0)
    mf.rename("facets")

    if (MPI.size(mesh.mpi_comm()) == 1):
        for facet in Facets(mesh):
            mf[facet] = dtype(facet.index())
    else:
        for facet in Facets(mesh):
            mf[facet] = dtype(facet.global_index())
    filename = os.path.join(tempdir, "mf_facet_2D_%s.xdmf" % dtype_str)

    with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf:
        xdmf.write(mf)

    with XDMFFile(mesh.mpi_comm(), filename) as xdmf:
        read_function = getattr(xdmf, "read_mf_" + dtype_str)
        mf_in = read_function(mesh, "facets")

    diff = 0
    for facet in Facets(mesh):
        diff += (mf_in[facet] - mf[facet])
    assert diff == 0
示例#2
0
def pytest_generate_tests(metafunc):
    if 'dim' in metafunc.fixturenames:
        metafunc.parametrize("dim", [2, 3])

    # Set random seed
    new_seed = MPI.sum(mpi_comm_world(), randint(0, 1e6)) / MPI.size(
        mpi_comm_world())
    seed(new_seed)

    # TODO: Make options to select all or subset of schemes for this factory,
    #       copy from or look at regression conftest,
    if 'scheme_factory' in metafunc.fixturenames:
        metafunc.parametrize("scheme_factory", create_scheme_factories())

    if 'D' in metafunc.fixturenames:
        metafunc.parametrize("D", [2, 3])

    if 'start_time' in metafunc.fixturenames:
        start_times = [0.0]
        if metafunc.config.option.all:
            start_times += list(0.8 * random(3))
        metafunc.parametrize("start_time", start_times)

    if 'end_time' in metafunc.fixturenames:
        end_times = [2.0]
        if metafunc.config.option.all:
            end_times += list(1.2 + 0.8 * random(3))
        metafunc.parametrize("end_time", end_times)

    if 'dt' in metafunc.fixturenames:
        dts = [0.1]
        if metafunc.config.option.all:
            dts += [0.05 + 0.05 * random(), 0.2 + 0.2 * random()]
        metafunc.parametrize("dt", dts)
示例#3
0
    def test_convert_diffpack(self):
        from dolfin import Mesh, MPI, MeshFunction, mpi_comm_world
        if MPI.size(mpi_comm_world()) != 1:
            return
        fname = os.path.join("data", "diffpack_tet")
        dfname = fname + ".xml"

        # Read triangle file and convert to a dolfin xml mesh file
        meshconvert.diffpack2xml(fname + ".grid", dfname)

        # Read in dolfin mesh and check number of cells and vertices
        mesh = Mesh(dfname)
        self.assertEqual(mesh.num_vertices(), 27)
        self.assertEqual(mesh.num_cells(), 48)
        self.assertEqual(len(mesh.domains().markers(3)), 48)
        self.assertEqual(len(mesh.domains().markers(2)), 16)

        mf_basename = dfname.replace(".xml", "_marker_%d.xml")
        for marker, num in [(3, 9), (6, 9), (7, 3), (8, 1)]:

            mf_name = mf_basename % marker
            mf = MeshFunction("size_t", mesh, mf_name)
            self.assertEqual(sum(mf.array() == marker), num)
            os.unlink(mf_name)

        # Clean up
        os.unlink(dfname)
示例#4
0
    def test_compute_entity_collisions_tree_3d(self):

        references = [[set([18, 19, 20, 21, 22, 23, 42, 43, 44, 45, 46, 47]),
                       set([0, 1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29])],
                      [set([7, 8, 30, 31, 32]),
                       set([15, 16, 17, 39, 41])]]

        points = [Point(0.52, 0.51, 0.3), Point(0.9, -0.9, 0.3)]

        for i, point in enumerate(points):

            mesh_A = UnitCubeMesh(2, 2, 2)
            mesh_B = UnitCubeMesh(2, 2, 2)

            mesh_B.translate(point)

            tree_A = BoundingBoxTree()
            tree_A.build(mesh_A)

            tree_B = BoundingBoxTree()
            tree_B.build(mesh_B)

            entities_A, entities_B = tree_A.compute_entity_collisions(tree_B)

            if MPI.size(mesh_A.mpi_comm()) == 1:
                self.assertEqual(set(entities_A), references[i][0])
                self.assertEqual(set(entities_B), references[i][1])
示例#5
0
    def test_compute_entity_collisions_tree_2d(self):

        references = [[set([20, 21, 22, 23, 28, 29, 30, 31]),
                       set([0, 1, 2, 3, 8, 9, 10, 11])],
                      [set([6]),
                       set([25])]]

        points = [Point(0.52, 0.51), Point(0.9, -0.9)]

        for i, point in enumerate(points):

            mesh_A = UnitSquareMesh(4, 4)
            mesh_B = UnitSquareMesh(4, 4)

            mesh_B.translate(point)

            tree_A = BoundingBoxTree()
            tree_A.build(mesh_A)

            tree_B = BoundingBoxTree()
            tree_B.build(mesh_B)

            entities_A, entities_B = tree_A.compute_entity_collisions(tree_B)

            if MPI.size(mesh_A.mpi_comm()) == 1:
                self.assertEqual(set(entities_A), references[i][0])
                self.assertEqual(set(entities_B), references[i][1])
示例#6
0
    def test_compute_first_entity_collision_1d(self):

        reference = [4]

        p = Point(0.3)
        mesh = UnitIntervalMesh(16)
        tree = BoundingBoxTree()
        tree.build(mesh)
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)

        tree = mesh.bounding_box_tree()
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)
示例#7
0
    def test_compute_collisions_tree_1d(self):

        references = [[set([8, 9, 10, 11, 12, 13, 14, 15]),
                       set([0, 1, 2, 3, 4, 5, 6, 7])],
                      [set([14, 15]),
                       set([0, 1])]]

        points = [Point(0.52), Point(0.9)]

        for i, point in enumerate(points):

            mesh_A = UnitIntervalMesh(16)
            mesh_B = UnitIntervalMesh(16)

            mesh_B.translate(point)

            tree_A = BoundingBoxTree()
            tree_A.build(mesh_A)

            tree_B = BoundingBoxTree()
            tree_B.build(mesh_B)

            entities_A, entities_B = tree_A.compute_collisions(tree_B)

            if MPI.size(mesh_A.mpi_comm()) == 1:
                self.assertEqual(set(entities_A), references[i][0])
                self.assertEqual(set(entities_B), references[i][1])
示例#8
0
def test_save_3D_facet_function(tempdir, encoding, data_type):
    dtype_str, dtype = data_type
    mesh = UnitCubeMesh(MPI.comm_world, 4, 4, 4)
    mf = MeshFunction(dtype_str, mesh, mesh.topology.dim - 1, 0)
    mf.name = "facets"

    if (MPI.size(mesh.mpi_comm()) == 1):
        for facet in Facets(mesh):
            mf[facet] = dtype(facet.index())
    else:
        for facet in Facets(mesh):
            mf[facet] = dtype(facet.global_index())
    filename = os.path.join(tempdir, "mf_facet_3D_%s.xdmf" % dtype_str)

    with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf:
        xdmf.write(mf)

    with XDMFFile(mesh.mpi_comm(), filename) as xdmf:
        read_function = getattr(xdmf, "read_mf_" + dtype_str)
        mf_in = read_function(mesh, "facets")

    diff = 0
    for facet in Facets(mesh):
        diff += (mf_in[facet] - mf[facet])
    assert diff == 0
示例#9
0
def csr_to_petsc4py(csr_matrix):
    '''Convert Scipy's csr matrix to PETSc matrix.'''
    assert MPI.size(mpi_comm_world()) == 1, 'mat_to_csr assumes single process'

    if isinstance(csr_matrix, list):
        return [csr_to_pets4py(mat) for mat in csr_matrix]
    # None is zero block
    elif csr_matrix is None:
        return None
    else:
        A = csr_matrix
        csr = (A.indptr, A.indices, A.data)
        # Convert to PETSc
        n_rows, n_cols = A.shape
        A_petsc = PETSc.Mat().createAIJ(size=A.shape, csr=csr)

        # Now set local to global mapping for indices. This is supposed to run in
        # serial only so these are identities.
        row_lgmap = PETSc.LGMap().create(list(arange(n_rows, dtype=int)))
        if not n_rows == n_cols:
            col_lgmap = PETSc.LGMap().create(list(arange(n_cols, dtype=int)))
        else:
            col_lgmap = row_lgmap

        A_petsc.setLGMap(row_lgmap, col_lgmap)
        A_petsc.assemble()

        return A_petsc
示例#10
0
    def __init__(self, V, options=None):
        # See if we have dofmap
        if not is_function_space(V):
            raise ValueError("V is not a function space.")

        # Only allow 2d and 3d meshes
        if V.mesh().geometry().dim() == 1:
            raise ValueError("Only 2d and 3d meshes are supported.")

        # Get MPI info
        try:
            from dolfin import mpi_comm_world

            self.mpi_size = MPI.size(mpi_comm_world())
            self.mpi_rank = MPI.rank(mpi_comm_world())
        except ImportError:
            self.mpi_size = MPI.num_processes()
            self.mpi_rank = MPI.process_number()

        # Analyze the space V
        self.V = V
        self.dofmaps = extract_dofmaps(self.V)
        self.bounds = bounds(self.V)

        # Rewrite default plotting options if they are provided by user
        self.options = {"colors": {"mesh_entities": "hsv", "mesh": "Blues"}, "xkcd": False, "markersize": 40}
        if options is not None:
            self.options.update(options)

        # Keep track of the plots
        self.plots = []
示例#11
0
def mesh():
    mesh = UnitSquareMesh(3, 3)
    assert MPI.size(mesh.mpi_comm()) in (1, 2, 3, 4)
    # 1 processor        -> test serial case
    # 2 and 3 processors -> test case where submesh in contained only on one processor
    # 4 processors       -> test case where submesh is shared by two processors, resulting in shared facets and vertices
    return mesh
示例#12
0
def test_submesh_global_cell_numbering_independent_on_mpi(
        mesh, submesh_markers, submesh, tempdir):
    cell_markers = dict()
    cell_centroids = dict()
    for submesh_cell in cells(submesh):
        submesh_local_index = submesh_cell.index()
        submesh_global_index = submesh.topology().global_indices(
            submesh.topology().dim())[submesh_local_index]
        mesh_local_index = submesh.submesh_to_mesh_cell_local_indices[
            submesh_local_index]
        cell_markers[submesh_global_index] = submesh_markers.array(
        )[mesh_local_index]
        cell_centroids[submesh_global_index] = [
            submesh_cell.midpoint()[i] for i in range(submesh.topology().dim())
        ]
    output_filename = "test_submesh_cell_numbering_independent_on_mpi__size_" + str(
        MPI.size(submesh.mpi_comm())) + "_rank_" + str(
            MPI.rank(submesh.mpi_comm())) + ".pkl"
    with open(os.path.join(tempdir, output_filename), "wb") as outfile:
        pickle.dump(cell_centroids, outfile, protocol=pickle.HIGHEST_PROTOCOL)
    input_filename = "test_submesh_cell_numbering_independent_on_mpi__size_1_rank_0.pkl"
    with open(os.path.join(data_dir, input_filename), "rb") as infile:
        serial_cell_centroids = pickle.load(infile)
    for submesh_global_index in cell_centroids.keys():
        if submesh_global_index < len(serial_cell_centroids):
            assert allclose(cell_centroids[submesh_global_index],
                            serial_cell_centroids[submesh_global_index])
            assert cell_markers[submesh_global_index]
        else:
            assert not cell_markers[submesh_global_index]
示例#13
0
def mat_to_csr(mat):
    '''Convert any dolfin.Matrix to csr matrix in scipy.'''
    assert MPI.size(mpi_comm_world()) == 1, 'mat_to_csr assumes single process'
    # We can handle blocks
    if isinstance(mat, (list, ndarray, block_mat)):
        return [mat_to_csr(mat_) for mat_ in mat]
    # Number block can anly be zero and for bmat these are None
    elif isinstance(mat, (int, float)):
        assert abs(mat) < 1E-15
        return None
    # Actual matrix
    else:
        rows = [0]
        cols = []
        values = []
        for row in range(mat.size(0)):
            cols_, values_ = mat.getrow(row)
            rows.append(len(cols_)+rows[-1])
            cols.extend(cols_)
            values.extend(values_)

        shape = mat.size(0), mat.size(1)
        
        return csr_matrix((asarray(values, dtype='float'),
                           asarray(cols, dtype='int'),
                           asarray(rows, dtype='int')),
                           shape)
示例#14
0
    def test_compute_first_entity_collision_3d(self):

        reference = [876, 877, 878, 879, 880, 881]

        p = Point(0.3, 0.3, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)
        tree = BoundingBoxTree()
        tree.build(mesh)
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)

        tree = mesh.bounding_box_tree()
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)
示例#15
0
    def test_compute_first_entity_collision_2d(self):

        reference = [136, 137]

        p = Point(0.3, 0.3)
        mesh = UnitSquareMesh(16, 16)
        tree = BoundingBoxTree()
        tree.build(mesh)
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)


        tree = mesh.bounding_box_tree()
        first = tree.compute_first_entity_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference)
示例#16
0
def gc_barrier():
    """Internal utility to easily switch on and off calls to
    gc.collect() and MPI.barrier(world) in all fixtures here.
    Helps make the tests deterministic when debugging.
    """
    gc.collect()
    if MPI.size(mpi_comm_world()) > 1:
        MPI.barrier(mpi_comm_world())
示例#17
0
    def test_compute_first_collision_1d(self):

        reference = {1: [4]}

        p = Point(0.3)
        mesh = UnitIntervalMesh(16)
        for dim in range(1, 2):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            first = tree.compute_first_collision(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertIn(first, reference[dim])

        tree = mesh.bounding_box_tree()
        first = tree.compute_first_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference[mesh.topology().dim()])
示例#18
0
def gc_barrier():
    """Internal utility to easily switch on and off calls to
    gc.collect() and MPI.barrier(world) in all fixtures here.
    Helps make the tests deterministic when debugging.
    """
    gc.collect()
    if MPI.size(mpi_comm_world()) > 1:
        MPI.barrier(mpi_comm_world())
示例#19
0
def test_mpi_atomicity(tempdir):
    comm_world = MPI.comm_world
    if MPI.size(comm_world) > 1:
        filename = os.path.join(tempdir, "mpiatomic.h5")
        with HDF5File(MPI.comm_world, filename, "w") as f:
            assert f.get_mpi_atomicity() is False
            f.set_mpi_atomicity(True)
            assert f.get_mpi_atomicity() is True
示例#20
0
def test_append_and_load_mesh_functions(tempdir, encoding, data_type):
    if invalid_config(encoding):
        pytest.skip("XDMF unsupported in current configuration")

    dtype_str, dtype = data_type

    meshes = [
        UnitSquareMesh(MPI.comm_world, 12, 12),
        UnitCubeMesh(MPI.comm_world, 2, 2, 2)
    ]

    for mesh in meshes:
        dim = mesh.topology.dim

        vf = MeshFunction(dtype_str, mesh, 0, 0)
        vf.rename("vertices")
        ff = MeshFunction(dtype_str, mesh, mesh.topology.dim - 1, 0)
        ff.rename("facets")
        cf = MeshFunction(dtype_str, mesh, mesh.topology.dim, 0)
        cf.rename("cells")

        if (MPI.size(mesh.mpi_comm()) == 1):
            for vertex in Vertices(mesh):
                vf[vertex] = dtype(vertex.index())
            for facet in Facets(mesh):
                ff[facet] = dtype(facet.index())
            for cell in Cells(mesh):
                cf[cell] = dtype(cell.index())
        else:
            for vertex in Vertices(mesh):
                vf[vertex] = dtype(vertex.global_index())
            for facet in Facets(mesh):
                ff[facet] = dtype(facet.global_index())
            for cell in Cells(mesh):
                cf[cell] = dtype(cell.global_index())

        filename = os.path.join(tempdir, "appended_mf_%dD.xdmf" % dim)

        with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf:
            xdmf.write(mesh)
            xdmf.write(vf)
            xdmf.write(ff)
            xdmf.write(cf)

        with XDMFFile(mesh.mpi_comm(), filename) as xdmf:
            read_function = getattr(xdmf, "read_mf_" + dtype_str)
            vf_in = read_function(mesh, "vertices")
            ff_in = read_function(mesh, "facets")
            cf_in = read_function(mesh, "cells")

        diff = 0
        for vertex in Vertices(mesh):
            diff += (vf_in[vertex] - vf[vertex])
        for facet in Facets(mesh):
            diff += (ff_in[facet] - ff[facet])
        for cell in Cells(mesh):
            diff += (cf_in[cell] - cf[cell])
        assert diff == 0
示例#21
0
 def __init__(self, comm, Outputfolder=None):
     self.mpirank = MPI.rank(comm)
     mpisize = MPI.size(comm)
     if Outputfolder == None: self.set_outdir('Output/', comm)
     else: self.set_outdir(Outputfolder, comm)
     if mpisize == 1: self.extensionvtu = 'vtu'
     else: self.extensionvtu = 'pvtu'
     self.indices = []
     self.varname = []
示例#22
0
    def test_compute_entity_collisions_2d(self):

        reference = set([136, 137])

        p = Point(0.3, 0.3)
        mesh = UnitSquareMesh(16, 16)

        tree = BoundingBoxTree()
        tree.build(mesh)
        entities = tree.compute_entity_collisions(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(set(entities), reference)

        tree = mesh.bounding_box_tree()
        entities = tree.compute_entity_collisions(p)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(set(entities), reference)
示例#23
0
    def test_compute_first_collision_2d(self):

        reference = {1: [226],
                     2: [136, 137]}

        p = Point(0.3, 0.3)
        mesh = UnitSquareMesh(16, 16)
        for dim in range(1, 3):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            first = tree.compute_first_collision(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertIn(first, reference[dim])

        tree = mesh.bounding_box_tree()
        first = tree.compute_first_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference[mesh.topology().dim()])
示例#24
0
    def test_mesh_point_2d(self):
        "Test mesh-point intersection in 2D"

        point = Point(0.1, 0.2)
        mesh = UnitSquareMesh(16, 16)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [98])
示例#25
0
    def test_mesh_point_1d(self):
        "Test mesh-point intersection in 1D"

        point = Point(0.1)
        mesh = UnitIntervalMesh(16)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [1])
示例#26
0
def test_ghost_2d(mode):
    N = 8
    num_cells = N * N * 2

    mesh = UnitSquareMesh(MPI.comm_world, N, N, ghost_mode=mode)
    if MPI.size(mesh.mpi_comm()) > 1:
        assert MPI.sum(mesh.mpi_comm(), mesh.num_cells()) > num_cells

    assert mesh.num_entities_global(0) == 81
    assert mesh.num_entities_global(2) == num_cells
示例#27
0
    def test_mesh_point_3d(self):
        "Test mesh-point intersection in 3D"

        point = Point(0.1, 0.2, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [816])
示例#28
0
    def test_mesh_point_1d(self):
        "Test mesh-point intersection in 1D"

        point = Point(0.1)
        mesh = UnitIntervalMesh(16)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [1])
示例#29
0
    def test_mesh_point_3d(self):
        "Test mesh-point intersection in 3D"

        point = Point(0.1, 0.2, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [816])
示例#30
0
    def test_mesh_point_2d(self):
        "Test mesh-point intersection in 2D"

        point = Point(0.1, 0.2)
        mesh = UnitSquareMesh(16, 16)

        intersection = intersect(mesh, point)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(intersection.intersected_cells(), [98])
示例#31
0
def test_ghost_3d(mode):
    N = 2
    num_cells = N * N * N * 6

    mesh = UnitCubeMesh(MPI.comm_world, N, N, N, ghost_mode=mode)
    if MPI.size(mesh.mpi_comm()) > 1:
        assert MPI.sum(mesh.mpi_comm(), mesh.num_cells()) > num_cells

    assert mesh.num_entities_global(0) == 27
    assert mesh.num_entities_global(3) == num_cells
示例#32
0
    def test_compute_closest_entity_3d(self):

        reference = (0, 0.1)

        p = Point(0.1, 0.05, -0.1)
        mesh = UnitCubeMesh(8, 8, 8)
        tree = BoundingBoxTree()
        tree.build(mesh)
        entity, distance = tree.compute_closest_entity(p)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(entity, reference[0])
            self.assertAlmostEqual(distance, reference[1])

        tree = mesh.bounding_box_tree()
        entity, distance = tree.compute_closest_entity(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(entity, reference[0])
            self.assertAlmostEqual(distance, reference[1])
示例#33
0
    def test_compute_first_collision_3d(self):

        reference = {1: [1364],
                     2: [1967, 1968, 1970, 1972, 1974, 1976],
                     3: [876, 877, 878, 879, 880, 881]}

        p = Point(0.3, 0.3, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)
        for dim in range(1, 4):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            first = tree.compute_first_collision(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertIn(first, reference[dim])

        tree = mesh.bounding_box_tree()
        first = tree.compute_first_collision(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertIn(first, reference[mesh.topology().dim()])
示例#34
0
    def test_compute_closest_entity_2d(self):

        reference = (1, 1.0)

        p = Point(-1.0, 0.01)
        mesh = UnitSquareMesh(16, 16)
        tree = BoundingBoxTree()
        tree.build(mesh)
        entity, distance = tree.compute_closest_entity(p)

        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(entity, reference[0])
            self.assertAlmostEqual(distance, reference[1])

        tree = mesh.bounding_box_tree()
        entity, distance = tree.compute_closest_entity(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(entity, reference[0])
            self.assertAlmostEqual(distance, reference[1])
示例#35
0
    def _clean_hdf5(self, fieldname, del_metadata):
        delete_from_hdf5_file = '''
        namespace dolfin {
            #include <hdf5.h>
            void delete_from_hdf5_file(const MPI_Comm comm,
                                       const std::string hdf5_filename,
                                       const std::string dataset,
                                       const bool use_mpiio)
            {
                //const hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
                // Open file existing file for append
                //hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);
                hid_t hdf5_file_id = HDF5Interface::open_file(comm, hdf5_filename, "a", use_mpiio);

                H5Ldelete(hdf5_file_id, dataset.c_str(), H5P_DEFAULT);
                HDF5Interface::close_file(hdf5_file_id);
            }
        }
        '''
        cpp_module = compile_extension_module(
            delete_from_hdf5_file,
            additional_system_headers=["dolfin/io/HDF5Interface.h"])

        hdf5filename = os.path.join(self._pp.get_savedir(fieldname),
                                    fieldname + '.hdf5')

        if not os.path.isfile(hdf5filename):
            return

        for k, v in del_metadata.items():
            if 'hdf5' not in v:
                continue
            else:
                cpp_module.delete_from_hdf5_file(
                    mpi_comm_world(), hdf5filename, v['hdf5']['dataset'],
                    MPI.size(mpi_comm_world()) > 1)

        hdf5tmpfilename = os.path.join(self._pp.get_savedir(fieldname),
                                       fieldname + '_tmp.hdf5')
        #import ipdb; ipdb.set_trace()
        MPI.barrier(mpi_comm_world())
        if on_master_process():
            # status, result = getstatusoutput("h5repack -V")
            status, result = -1, -1
            if status != 0:
                cbc_warning(
                    "Unable to run h5repack. Will not repack hdf5-files before replay, which may cause bloated hdf5-files."
                )
            else:
                subprocess.call("h5repack %s %s" %
                                (hdf5filename, hdf5tmpfilename),
                                shell=True)
                os.remove(hdf5filename)
                os.rename(hdf5tmpfilename, hdf5filename)
        MPI.barrier(mpi_comm_world())
示例#36
0
def test_mesh_to_submesh_global_facet_indices(mesh, submesh, tempdir):
    test_logger.log(DEBUG, "Mesh to submesh global facet indices:")
    for (mesh_local_index, submesh_local_index) in submesh.mesh_to_submesh_facet_local_indices.items():
        mesh_global_index = mesh.topology().global_indices(mesh.topology().dim() - 1)[mesh_local_index]
        submesh_global_index = submesh.topology().global_indices(submesh.topology().dim() - 1)[submesh_local_index]
        test_logger.log(DEBUG, "\t" + str(mesh_global_index) + " -> " + str(submesh_global_index))
    assert_mesh_plotter(mesh, submesh, "T", tempdir, "test_mesh_to_submesh_global_facet_indices")
    filename = ("test_mesh_to_submesh_global_facet_indices" + "_size_" + str(MPI.size(submesh.mpi_comm()))
                + "_rank_" + str(MPI.rank(submesh.mpi_comm())) + ".pkl")
    dict_save(submesh.mesh_to_submesh_facet_local_indices, tempdir, filename)
    dict_assert_equal(submesh.mesh_to_submesh_facet_local_indices, data_dir, filename)
示例#37
0
def test_submesh_to_mesh_global_vertex_indices(mesh, submesh, tempdir):
    test_logger.log(DEBUG, "Submesh to mesh global vertex indices:")
    for (submesh_local_index, mesh_local_index) in enumerate(submesh.submesh_to_mesh_vertex_local_indices):
        submesh_global_index = submesh.topology().global_indices(0)[submesh_local_index]
        mesh_global_index = mesh.topology().global_indices(0)[mesh_local_index]
        test_logger.log(DEBUG, "\t" + str(submesh_global_index) + " -> " + str(mesh_global_index))
    assert_mesh_plotter(mesh, submesh, "V", tempdir, "test_submesh_to_mesh_global_vertex_indices")
    filename = ("test_submesh_to_mesh_global_vertex_indices" + "_size_" + str(MPI.size(submesh.mpi_comm()))
                + "_rank_" + str(MPI.rank(submesh.mpi_comm())) + ".pkl")
    array_save(submesh.submesh_to_mesh_vertex_local_indices, tempdir, filename)
    array_assert_equal(submesh.submesh_to_mesh_vertex_local_indices, data_dir, filename)
示例#38
0
def make_figures(time_data_pd, u, alpha, load, outdir):
    if MPI.size(MPI.comm_world):
        plt.figure(1)
        pltu = plt.colorbar(plot(u, mode="displacement", title=u.name()))
        plt.savefig(os.path.join(outdir, "plot_u_{:3.4f}.png".format(load)))
        plt.figure(2)
        pltalpha = plt.colorbar(plot(alpha, title=alpha.name()))
        plt.subplots_adjust(hspace=0.8)
        plt.savefig(os.path.join(outdir,
                                 "plot_alpha_{:3.4f}.png".format(load)))
        plt.close("all")
示例#39
0
    def test_compute_collisions_point_1d(self):

        reference = {1: set([4])}

        p = Point(0.3)
        mesh = UnitIntervalMesh(16)
        for dim in range(1, 2):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            entities = tree.compute_collisions(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertEqual(set(entities), reference[dim])
示例#40
0
    def test_compute_entity_collisions_3d(self):

        reference = set([876, 877, 878, 879, 880, 881])

        p = Point(0.3, 0.3, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)

        tree = BoundingBoxTree()
        tree.build(mesh)
        entities = tree.compute_entity_collisions(p)
        if MPI.size(mesh.mpi_comm()) == 1:
            self.assertEqual(set(entities), reference)
示例#41
0
 def compare_ab_global(self):
     """
     Check that med param (a, b) are the same across all proc
     """
     assign(self.ab.sub(0), self.PDE.a)
     assign(self.ab.sub(1), self.PDE.b)
     ab_recv = self.ab.vector().copy()
     normabloc = np.linalg.norm(self.ab.vector().array())
     MPIAllReduceVector(self.ab.vector(), ab_recv, self.mpicomm_global)
     ab_recv /= MPI.size(self.mpicomm_global)
     diff = ab_recv - self.ab.vector()
     reldiff = np.linalg.norm(diff.array())/normabloc
     assert reldiff < 2e-16, 'Diff in (a,b) across proc: {:.2e}'.format(reldiff)
示例#42
0
    def test_compute_collisions_point_2d(self):

        reference = {1: set([226]),
                     2: set([136, 137])}

        p = Point(0.3, 0.3)
        mesh = UnitSquareMesh(16, 16)
        for dim in range(1, 3):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            entities = tree.compute_collisions(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertEqual(set(entities), reference[dim])
示例#43
0
    def test_compute_collisions_point_3d(self):

        reference = {1: set([1364]),
                     2: set([1967, 1968, 1970, 1972, 1974, 1976]),
                     3: set([876, 877, 878, 879, 880, 881])}

        p = Point(0.3, 0.3, 0.3)
        mesh = UnitCubeMesh(8, 8, 8)
        for dim in range(1, 4):
            tree = BoundingBoxTree()
            tree.build(mesh, dim)
            entities = tree.compute_collisions(p)
            if MPI.size(mesh.mpi_comm()) == 1:
                self.assertEqual(set(entities), reference[dim])
示例#44
0
    def mesh(self):
        "Return the dolfin mesh"

        # If no mesh is stored read in from UnstructuredGridData
        if self._mesh is None:
            self._mesh = vtk_ug_to_dolfin_mesh(self.reader.GetOutput())

        # Small sanity check, only works in parallel
        if MPI.size(mpi_comm_world()) == 1:
            assert(self._mesh.num_vertices() == \
                self.reader.GetOutput().GetNumberOfPoints() and \
                self._mesh.num_cells() == \
                self.reader.GetOutput().GetNumberOfCells())
        
        return self._mesh
示例#45
0
def trace(A, mpi_comm = mpi_comm_world() ):
    """
    Compute the trace of a sparse matrix :math:`A`.
    """
    v = Vector(mpi_comm)
    A.init_vector(v)
    nprocs = MPI.size(mpi_comm)
    
    if nprocs > 1:
        raise Exception("trace is only serial")
    
    n  = A.size(0)
    tr = 0.
    for i in range(0,n):
        [j, val] = A.getrow(i)
        tr += val[j == i]
    return tr
示例#46
0
def save_checkpoint_solution_h5(tstep, q_, q_1, newfolder, u_components,
                                NS_parameters):
    """Overwrite solution in Checkpoint folder.

    For safety reasons, in case the solver is interrupted, take backup of
    solution first.

    Must be restarted using the same mesh-partitioning. This will be fixed
    soon. (MM)

    """
    checkpointfolder = path.join(newfolder, "Checkpoint")
    NS_parameters["num_processes"] = MPI.size(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        if path.exists(path.join(checkpointfolder, "params.dat")):
            system('cp {0} {1}'.format(path.join(checkpointfolder, "params.dat"),
                                       path.join(checkpointfolder, "params_old.dat")))
        f = open(path.join(checkpointfolder, "params.dat"), 'wb')
        pickle.dump(NS_parameters,  f)

    MPI.barrier(MPI.comm_world)
    for ui in q_:
        h5file = path.join(checkpointfolder, ui + '.h5')
        oldfile = path.join(checkpointfolder, ui + '_old.h5')
        # For safety reasons...
        if path.exists(h5file):
            if MPI.rank(MPI.comm_world) == 0:
                system('cp {0} {1}'.format(h5file, oldfile))
        MPI.barrier(MPI.comm_world)
        ###
        newfile = HDF5File(MPI.comm_world, h5file, 'w')
        newfile.flush()
        newfile.write(q_[ui].vector(), '/current')
        if ui in u_components:
            newfile.write(q_1[ui].vector(), '/previous')
        if path.exists(oldfile):
            if MPI.rank(MPI.comm_world) == 0:
                system('rm {0}'.format(oldfile))
        MPI.barrier(MPI.comm_world)
        newfile.close()
    if MPI.rank(MPI.comm_world) == 0 and path.exists(path.join(checkpointfolder, "params_old.dat")):
        system('rm {0}'.format(path.join(checkpointfolder, "params_old.dat")))
示例#47
0
文件: utils.py 项目: blechta/fenapack
def get_default_factor_solver_type(comm):
    """Return first available factor solver type name.
    This is implemened using DOLFIN now."""

    methods_parallel = ("mumps", "superlu_dist", "pastix")
    methods_sequential = ("mumps", "umfpack", "superlu",
                          "superlu_dist", "pastix")

    if isinstance(comm, PETSc.Comm):
        comm = comm.tompi4py()

    if MPI.size(comm) > 1:
        methods = methods_parallel
    else:
        methods = methods_sequential

    for method in methods:
        if has_lu_solver_method(method):
            return method

    raise RuntimeError("Did not find any suitable direct sparse solver in PETSc")
示例#48
0
def test_multi_ps_vector_node_local(mesh):
    """Tests point source when given constructor PointSource(V, V, point,
    mag) with a matrix when points placed at 3 node for 1D, 2D and
    3D. Local points given to constructor.

    """

    V = FunctionSpace(mesh, "CG", 1)
    v = TestFunction(V)
    b = assemble(Constant(0.0)*v*dx)

    source = []
    point_coords = mesh.coordinates()[0]
    source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(b)

    # Checks b sums to correct value
    size = MPI.size(mesh.mpi_comm())
    b_sum = b.sum()
    assert round(b_sum - size*10.0) == 0
示例#49
0
def to_dense(A, mpi_comm = mpi_comm_world() ):
    """
    Convert a sparse matrix A to dense.
    For debugging only.
    """
    v = Vector(mpi_comm)
    A.init_vector(v)
    nprocs = MPI.size(mpi_comm)
    
    if nprocs > 1:
        raise Exception("to_dense is only serial")
    
    if hasattr(A, "getrow"):
        n  = A.size(0)
        m  = A.size(1)
        B = np.zeros( (n,m), dtype=np.float64)
        for i in range(0,n):
            [j, val] = A.getrow(i)
            B[i,j] = val
        
        return B
    else:
        x = Vector(mpi_comm)
        Ax = Vector(mpi_comm)
        A.init_vector(x,1)
        A.init_vector(Ax,0)
        
        n = Ax.get_local().shape[0]
        m = x.get_local().shape[0]
        B = np.zeros( (n,m), dtype=np.float64) 
        for i in range(0,m):
            i_ind = np.array([i], dtype=np.intc)
            x.set_local(np.ones(i_ind.shape), i_ind)
            x.apply("sum_values")
            A.mult(x,Ax)
            B[:,i] = Ax.get_local()
            x.set_local(np.zeros(i_ind.shape), i_ind)
            x.apply("sum_values")
            
        return B
示例#50
0
def test_multi_ps_matrix_node_local(mesh):
    """Tests point source when given constructor PointSource(V, V, point,
    mag) with a matrix when points placed at 3 node for 1D, 2D and
    3D. Local points given to constructor.

    """

    V = FunctionSpace(mesh, "CG", 1)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0)*u*v*dx)

    source = []
    point_coords = mesh.coordinates()[0]
    source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks matrix sums to correct value.
    A.get_diagonal(w.vector())
    size = MPI.size(mesh.mpi_comm())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - size*10.0) == 0
示例#51
0
import sys
from time import time
from dolfin import UnitSquareMesh, FunctionSpace, TrialFunction, TestFunction, \
Function, inner, nabla_grad, dx, ds, assemble, sqrt, FacetFunction, Measure, \
SubDomain, MPI, mpi_comm_world
mpicomm = mpi_comm_world()
mpirank = MPI.rank(mpicomm)
mpisize = MPI.size(mpicomm)

mesh = UnitSquareMesh(100, 100, "crossed")

V = FunctionSpace(mesh, 'Lagrange', 2)
Vl = FunctionSpace(mesh, 'Lagrange', 1)
Vr = FunctionSpace(mesh, 'Lagrange', 1)

trial = TrialFunction(V)
test = TestFunction(V)

lam1 = Function(Vl)
lam2 = Function(Vl)
lamV = Function(V)
rho1 = Function(Vl)
rho2 = Function(Vr)


try:
    myrun = int(sys.argv[1])
except:
    myrun = 2

if myrun == 1: