示例#1
0
def apply_noise(UD, noisepercent, mycomm=None):
    """ WARNING: SUPERCEDED BY CLASS OBSERVATIONOPERATOR
    Apply Gaussian noise to data.
    noisepercent = 0.02 => 2% noise level, i.e.,
    || u - ud || / || ud || = || noise || / || ud || = 0.02 """
    UDnoise = []
    objnoise = 0.0
    for ud in UD:
        noisevect = randn(len(ud))
        # Get norm of entire random vector:
        try:
            normrand = sqrt(MPI.sum(mycomm, norm(noisevect)**2))
        except:
            normrand = norm(noisevect)
        noisevect /= normrand
        # Get norm of entire vector ud (not just local part):
        try:
            normud = sqrt(MPI.sum(mycomm, norm(ud)**2))
        except:
            normud = norm(ud)
        noisevect *= noisepercent * normud
        objnoise += norm(noisevect)**2
        UDnoise.append(ud + noisevect)

    return UDnoise, objnoise
示例#2
0
def _create_tempdir(request):
    # Get directory name of test_foo.py file
    testfile = request.module.__file__
    testfiledir = os.path.dirname(os.path.abspath(testfile))

    # Construct name test_foo_tempdir from name test_foo.py
    testfilename = os.path.basename(testfile)
    outputname = testfilename.replace(".py", "_tempdir_{}".format(
        worker_id(request)))

    # Get function name test_something from test_foo.py
    function = request.function.__name__

    # Join all of these to make a unique path for this test function
    basepath = os.path.join(testfiledir, outputname)
    path = os.path.join(basepath, function)

    # Add a sequence number to avoid collisions when tests are
    # otherwise parameterized
    if MPI.rank(MPI.comm_world) == 0:
        _create_tempdir._sequencenumber[path] += 1
        sequencenumber = _create_tempdir._sequencenumber[path]
        sequencenumber = MPI.sum(MPI.comm_world, sequencenumber)
    else:
        sequencenumber = MPI.sum(MPI.comm_world, 0)
    path += "__" + str(sequencenumber)

    # Delete and re-create directory on root node
    if MPI.rank(MPI.comm_world) == 0:
        # First time visiting this basepath, delete the old and create
        # a new
        if basepath not in _create_tempdir._basepaths:
            _create_tempdir._basepaths.add(basepath)
            if os.path.exists(basepath):
                shutil.rmtree(basepath)
            # Make sure we have the base path test_foo_tempdir for
            # this test_foo.py file
            if not os.path.exists(basepath):
                os.mkdir(basepath)

        # Delete path from old test run
        if os.path.exists(path):
            shutil.rmtree(path)
        # Make sure we have the path for this test execution:
        # e.g. test_foo_tempdir/test_something__3
        if not os.path.exists(path):
            os.mkdir(path)
    MPI.barrier(MPI.comm_world)

    return path
 def setgradxdir(self, valueloc):   
     """Sum all local results for Grad . Srch_dir"""
     try:
         valueglob = MPI.sum(self.mycomm, valueloc)
     except:
         valueglob = valueloc
     self.gradxdir = valueglob
示例#4
0
def test_multi_ps_matrix(mesh):
    """Tests point source PointSource(V, source) for mulitple point
    sources applied to a matrix for 1D, 2D and 3D. Global points given
    to constructor from rank 0 processor.

    """

    c_ids = [0, 1, 2]
    rank = MPI.rank(mesh.mpi_comm())
    V = VectorFunctionSpace(mesh, "CG", 1, dim=2)
    u, v = TrialFunction(V), TestFunction(V)
    A = assemble(Constant(0.0)*dot(u, v)*dx)

    source = []
    if rank == 0:
        for c_id in c_ids:
            cell = Cell(mesh, c_id)
            point = cell.midpoint()
            source.append((point, 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks b sums to correct value
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 2*len(c_ids)*10) == 0
示例#5
0
def test_shared_entities(mesh_factory):
    func, args = mesh_factory
    # xfail_ghosted_quads_hexes(func, ghost_mode)
    mesh = func(*args)
    dim = mesh.topology.dim

    # FIXME: Implement a proper test
    for shared_dim in range(dim + 1):
        # Initialise global indices (if not already)
        mesh.init_global(shared_dim)

        assert isinstance(mesh.topology.shared_entities(shared_dim), dict)
        assert isinstance(mesh.topology.global_indices(shared_dim),
                          numpy.ndarray)

        if mesh.topology.have_shared_entities(shared_dim):
            for e in MeshEntities(mesh, shared_dim):
                sharing = e.sharing_processes()
                assert isinstance(sharing, set)
                assert (len(sharing) > 0) == e.is_shared()

        shared_entities = mesh.topology.shared_entities(shared_dim)

        # Check that sum(local-shared) = global count
        rank = MPI.rank(mesh.mpi_comm())
        ct = sum(1 for val in shared_entities.values() if list(val)[0] < rank)
        num_entities_global = MPI.sum(mesh.mpi_comm(),
                                      mesh.num_entities(shared_dim) - ct)

        assert num_entities_global == mesh.num_entities_global(shared_dim)
示例#6
0
def test_mesh_function_assign_2D_cells():
    mesh = UnitSquareMesh(MPI.comm_world, 3, 3)
    ncells = mesh.num_cells()
    f = MeshFunction("int", mesh, mesh.topology.dim, 0)
    for c in range(ncells):
        f.values[c] = ncells - c

    g = MeshValueCollection("int", mesh, 2)
    g.assign(f)
    assert ncells == len(f.values)
    assert ncells == g.size()

    f2 = MeshFunction("int", mesh, g, 0)

    for c in range(mesh.num_cells()):
        value = ncells - c
        assert value == g.get_value(c, 0)
        assert f2.values[c] == g.get_value(c, 0)

    h = MeshValueCollection("int", mesh, 2)
    global_indices = mesh.topology.global_indices(2)
    ncells_global = mesh.num_entities_global(2)
    for c in range(mesh.num_cells()):
        if global_indices[c] in [5, 8, 10]:
            continue
        value = ncells_global - global_indices[c]
        h.set_value(c, int(value))

    f3 = MeshFunction("int", mesh, h, 0)

    values = f3.values
    values[values > ncells_global] = 0.

    assert MPI.sum(mesh.mpi_comm(), values.sum() * 1.0) == 140.
示例#7
0
def test_pointsource_matrix_second_constructor(mesh, point):
    """Tests point source when given different constructor PointSource(V1,
    V2, point, mag) with a matrix and when placed at a node for 1D, 2D
    and 3D. Global points given to constructor from rank 0
    processor. Currently only implemented if V1=V2.

    """

    V1 = FunctionSpace(mesh, "CG", 1)
    V2 = FunctionSpace(mesh, "CG", 1)

    rank = MPI.rank(mesh.mpi_comm())
    u, v = TrialFunction(V1), TestFunction(V2)
    w = Function(V1)
    A = assemble(Constant(0.0)*u*v*dx)
    if rank == 0:
        ps = PointSource(V1, V2, point, 10.0)
    else:
        ps = PointSource(V1, V2, [])
    ps.apply(A)

    # Checks array sums to correct value
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 10.0) == 0

    # Checks point source is added to correct part of the array
    A.get_diagonal(w.vector())
    v2d = vertex_to_dof_map(V1)
    for v in vertices(mesh):
        if near(v.midpoint().distance(point), 0.0):
            ind = v2d[v.index()]
            if ind < len(A.array()):
                assert np.round(w.vector()[ind] - 10.0) == 0
示例#8
0
def gather_vector(u, size=None):
    comm = mpi_comm_world()

    if size is None:
        # size = int(MPI.size(comm) * MPI.sum(comm, u.size()))
        size = int(MPI.sum(comm, u.size()))

    # From this post: https://fenicsproject.discourse.group/t/gather-function-in-parallel-error/1114/4
    u_vec = dolfin.Vector(comm, size)
    # Values from everywhere on 0
    u_vec = u.gather_on_zero()
    # To everywhere from 0
    try:
        mine = comm.bcast(u_vec)
    except AttributeError:
        comm = comm.tompi4py()
        mine = comm.bcast(u_vec)

    # Reconstruct
    if comm.rank == 0:
        x = u_vec
    else:
        v = dolfin.Vector(MPI.comm_self, size)
        v.set_local(mine)
        x = v.get_local()

    return x
示例#9
0
def test_UnitCubeMeshDistributed():
    """Create mesh of unit cube."""
    mesh = UnitCubeMesh(MPI.comm_world, 5, 7, 9)
    assert mesh.num_entities_global(0) == 480
    assert mesh.num_entities_global(3) == 1890
    assert mesh.geometry.dim == 3
    assert MPI.sum(mesh.mpi_comm(), mesh.topology.ghost_offset(0)) == 480
示例#10
0
def pytest_generate_tests(metafunc):
    if 'dim' in metafunc.fixturenames:
        metafunc.parametrize("dim", [2, 3])

    # Set random seed
    new_seed = MPI.sum(mpi_comm_world(), randint(0, 1e6)) / MPI.size(
        mpi_comm_world())
    seed(new_seed)

    # TODO: Make options to select all or subset of schemes for this factory,
    #       copy from or look at regression conftest,
    if 'scheme_factory' in metafunc.fixturenames:
        metafunc.parametrize("scheme_factory", create_scheme_factories())

    if 'D' in metafunc.fixturenames:
        metafunc.parametrize("D", [2, 3])

    if 'start_time' in metafunc.fixturenames:
        start_times = [0.0]
        if metafunc.config.option.all:
            start_times += list(0.8 * random(3))
        metafunc.parametrize("start_time", start_times)

    if 'end_time' in metafunc.fixturenames:
        end_times = [2.0]
        if metafunc.config.option.all:
            end_times += list(1.2 + 0.8 * random(3))
        metafunc.parametrize("end_time", end_times)

    if 'dt' in metafunc.fixturenames:
        dts = [0.1]
        if metafunc.config.option.all:
            dts += [0.05 + 0.05 * random(), 0.2 + 0.2 * random()]
        metafunc.parametrize("dt", dts)
示例#11
0
def test_multi_ps_matrix(mesh):
    """Tests point source PointSource(V, source) for mulitple point
    sources applied to a matrix for 1D, 2D and 3D. Global points given
    to constructor from rank 0 processor.

    """

    c_ids = [0, 1, 2]
    rank = MPI.rank(mesh.mpi_comm())
    V = VectorFunctionSpace(mesh, "CG", 1, dim=2)
    u, v = TrialFunction(V), TestFunction(V)
    A = assemble(Constant(0.0) * dot(u, v) * dx)

    source = []
    if rank == 0:
        for c_id in c_ids:
            cell = Cell(mesh, c_id)
            point = cell.midpoint()
            source.append((point, 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks b sums to correct value
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 2 * len(c_ids) * 10) == 0
示例#12
0
def test_UnitSquareMeshDistributed():
    """Create mesh of unit square."""
    mesh = UnitSquareMesh(MPI.comm_world, 5, 7)
    assert mesh.num_entities_global(0) == 48
    assert mesh.num_entities_global(2) == 70
    assert mesh.geometry.dim == 2
    assert MPI.sum(mesh.mpi_comm(), mesh.topology.ghost_offset(0)) == 48
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level %d: E = %g (TOL = %g)' % (level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    ## Compute error indicators
    #K = array([c.volume() for c in cells(mesh)])
    #R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
    #gam = h*R*sqrt(K)
    return
示例#14
0
 def setgradxdir(self, valueloc):
     """Sum all local results for Grad . Srch_dir"""
     try:
         valueglob = MPI.sum(self.mycomm, valueloc)
     except:
         valueglob = valueloc
     self.gradxdir = valueglob
示例#15
0
def test_pointsource_matrix_second_constructor(mesh, point):
    """Tests point source when given different constructor PointSource(V1,
    V2, point, mag) with a matrix and when placed at a node for 1D, 2D
    and 3D. Global points given to constructor from rank 0
    processor. Currently only implemented if V1=V2.

    """

    V1 = FunctionSpace(mesh, "CG", 1)
    V2 = FunctionSpace(mesh, "CG", 1)

    rank = MPI.rank(mesh.mpi_comm())
    u, v = TrialFunction(V1), TestFunction(V2)
    w = Function(V1)
    A = assemble(Constant(0.0) * u * v * dx)
    if rank == 0:
        ps = PointSource(V1, V2, point, 10.0)
    else:
        ps = PointSource(V1, V2, [])
    ps.apply(A)

    # Checks array sums to correct value
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 10.0) == 0

    # Checks point source is added to correct part of the array
    A.get_diagonal(w.vector())
    v2d = vertex_to_dof_map(V1)
    for v in vertices(mesh):
        if near(v.midpoint().distance(point), 0.0):
            ind = v2d[v.index()]
            if ind < len(A.array()):
                assert np.round(w.vector()[ind] - 10.0) == 0
示例#16
0
def test_mesh_function_assign_2D_cells():
    mesh = UnitSquareMesh(MPI.comm_world, 3, 3)
    ncells = mesh.num_cells()
    f = MeshFunction("int", mesh, mesh.topology.dim, 0)
    for cell in Cells(mesh):
        f[cell] = ncells - cell.index()

    g = MeshValueCollection("int", mesh, 2)
    g.assign(f)
    assert ncells == f.size()
    assert ncells == g.size()

    f2 = MeshFunction("int", mesh, g, 0)

    for cell in Cells(mesh):
        value = ncells - cell.index()
        assert value == g.get_value(cell.index(), 0)
        assert f2[cell] == g.get_value(cell.index(), 0)

    h = MeshValueCollection("int", mesh, 2)
    global_indices = mesh.topology.global_indices(2)
    ncells_global = mesh.num_entities_global(2)
    for cell in Cells(mesh):
        if global_indices[cell.index()] in [5, 8, 10]:
            continue
        value = ncells_global - global_indices[cell.index()]
        h.set_value(cell.index(), int(value))

    f3 = MeshFunction("int", mesh, h, 0)

    values = f3.array()
    values[values > ncells_global] = 0.

    assert MPI.sum(mesh.mpi_comm(), values.sum() * 1.0) == 140.
示例#17
0
def _create_tempdir(request):
    # Get directory name of test_foo.py file
    testfile = request.module.__file__
    testfiledir = os.path.dirname(os.path.abspath(testfile))

    # Construct name test_foo_tempdir from name test_foo.py
    testfilename = os.path.basename(testfile)
    outputname = testfilename.replace(".py", "_tempdir")

    # Get function name test_something from test_foo.py
    function = request.function.__name__

    # Join all of these to make a unique path for this test function
    basepath = os.path.join(testfiledir, outputname)
    path = os.path.join(basepath, function)

    # Add a sequence number to avoid collisions when tests are otherwise parameterized
    if MPI.rank(mpi_comm_world()) == 0:
        _create_tempdir._sequencenumber[path] += 1
        sequencenumber = _create_tempdir._sequencenumber[path]
        sequencenumber = MPI.sum(mpi_comm_world(), sequencenumber)
    else:
        sequencenumber = MPI.sum(mpi_comm_world(), 0)
    path += "__" + str(sequencenumber)

    # Delete and re-create directory on root node
    if MPI.rank(mpi_comm_world()) == 0:
        # First time visiting this basepath, delete the old and create a new
        if basepath not in _create_tempdir._basepaths:
            _create_tempdir._basepaths.add(basepath)
            #if os.path.exists(basepath):
            #    shutil.rmtree(basepath)
            # Make sure we have the base path test_foo_tempdir for this test_foo.py file
            if not os.path.exists(basepath):
                os.mkdir(basepath)

        # Delete path from old test run
        #if os.path.exists(path):
        #    shutil.rmtree(path)
        # Make sure we have the path for this test execution: e.g. test_foo_tempdir/test_something__3
        if not os.path.exists(path):
            os.mkdir(path)
    MPI.barrier(mpi_comm_world())

    return path
示例#18
0
 def randomize(self):
     """Fill the block_vec with random data (with zero bias)."""
     import numpy
     from dolfin import MPI
     # FIXME: deal with dolfin MPI api changes
     for i in range(len(self)):
         if hasattr(self[i], 'local_size'):
             ran = numpy.random.random(self[i].local_size())
             ran -= MPI.sum(self[i].mpi_comm(), sum(ran)) / self[i].size()
             self[i].set_local(ran)
         elif hasattr(self[i], '__len__'):
             ran = numpy.random.random(len(self[i]))
             ran -= MPI.sum(self[i].mpi_comm(), sum(ran)) / MPI.sum(
                 len(ran))
             self[i][:] = ran
         else:
             raise RuntimeError(
                 'block %d in block_vec has no size -- use a proper vector or call allocate(A)'
                 % i)
示例#19
0
 def apply_noise(self, uin):
     """Apply Gaussian noise to np.array of data.
     noisepercent = 0.02 => 2% noise level, i.e.,
     || u - ud || / || ud || = || noise || / || ud || = 0.02"""
     isarray(uin)
     noisevect = randn(len(uin))
     # Get norm of entire random vector:
     if self.mycomm == None: normrand = norm(noisevect)
     else:
         normrand = sqrt(MPI.sum(self.mycomm, norm(noisevect)**2))
     noisevect /= normrand
     # Get norm of entire vector ud (not just local part):
     if self.mycomm == None: normud = norm(uin)
     else:
         normud = sqrt(MPI.sum(self.mycomm, norm(uin)**2))
     noisevect *= self.noisepercent * normud
     objnoise_glob = (self.noisepercent * normud)**2
     UDnoise = uin + noisevect
     return UDnoise, objnoise_glob
示例#20
0
def test_ghost_3d(mode):
    N = 2
    num_cells = N * N * N * 6

    mesh = UnitCubeMesh(MPI.comm_world, N, N, N, ghost_mode=mode)
    if MPI.size(mesh.mpi_comm()) > 1:
        assert MPI.sum(mesh.mpi_comm(), mesh.num_cells()) > num_cells

    assert mesh.num_entities_global(0) == 27
    assert mesh.num_entities_global(3) == num_cells
示例#21
0
def test_ghost_2d(mode):
    N = 8
    num_cells = N * N * 2

    mesh = UnitSquareMesh(MPI.comm_world, N, N, ghost_mode=mode)
    if MPI.size(mesh.mpi_comm()) > 1:
        assert MPI.sum(mesh.mpi_comm(), mesh.num_cells()) > num_cells

    assert mesh.num_entities_global(0) == 81
    assert mesh.num_entities_global(2) == num_cells
示例#22
0
def test_topology_surface(cube):
    surface_vertex_markers = cube.topology.on_boundary(0)
    assert surface_vertex_markers
    n = 3
    cube.create_entities(1)
    cube.create_connectivity(2, 1)
    surface_edge_markers = cube.topology.on_boundary(1)
    assert surface_edge_markers
    surface_facet_markers = cube.topology.on_boundary(2)
    sf_count = numpy.count_nonzero(numpy.array(surface_facet_markers))
    assert MPI.sum(cube.mpi_comm(), sf_count) == n * n * 12
示例#23
0
def check_if_kill(folder):
    """ Check if user has ordered to kill the simulation. """
    found = 0
    if "kill" in os.listdir(folder):
        found = 1
    found_all = MPI.sum(mpi_comm_world(), found)
    if found_all > 0:
        remove_safe(os.path.join(folder, "kill"))
        info_red("Stopping simulation.")
        return True
    else:
        return False
示例#24
0
文件: io.py 项目: gmkvaal/Oasis
def check_if_kill(folder):
    """Check if user has put a file named killoasis in folder."""
    found = 0
    if 'killoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(mpi_comm_world(), found)
    if collective > 0:
        if MPI.rank(mpi_comm_world()) == 0:
            remove(path.join(folder, 'killoasis'))
            info_red('killoasis Found! Stopping simulations cleanly...')
        return True
    else:
        return False
示例#25
0
文件: io.py 项目: jmaidana18/Oasis
def check_if_reset_statistics(folder):
    """Check if user has put a file named resetoasis in folder."""
    found = 0
    if 'resetoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(mpi_comm_world(), found)    
    if collective > 0:        
        if MPI.rank(mpi_comm_world()) == 0:
            remove(path.join(folder, 'resetoasis'))
            info_red('resetoasis Found!')
        return True
    else:
        return False
示例#26
0
文件: io.py 项目: gmkvaal/Oasis
def check_if_reset_statistics(folder):
    """Check if user has put a file named resetoasis in folder."""
    found = 0
    if 'resetoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(mpi_comm_world(), found)    
    if collective > 0:        
        if MPI.rank(mpi_comm_world()) == 0:
            remove(path.join(folder, 'resetoasis'))
            info_red('resetoasis Found!')
        return True
    else:
        return False
示例#27
0
文件: io.py 项目: jmaidana18/Oasis
def check_if_kill(folder):
    """Check if user has put a file named killoasis in folder."""
    found = 0
    if 'killoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(mpi_comm_world(), found)
    if collective > 0:
        if MPI.rank(mpi_comm_world()) == 0:
            remove(path.join(folder, 'killoasis'))
            info_red('killoasis Found! Stopping simulations cleanly...')
        return True
    else:
        return False
示例#28
0
    def costab(self, m1, m2):
        self.gradm1 = project(nabla_grad(m1), self.Vd)
        self.gradm2 = project(nabla_grad(m2), self.Vd)

        cost = 0.0
        for x, vol in zip(self.x, self.vol):
            G = np.array([self.gradm1(x), self.gradm2(x)]).T
            u, s, v = np.linalg.svd(G)
            sqrts2eps = np.sqrt(s**2 + self.eps)
            cost += vol * sqrts2eps.sum()

        cost_global = MPI.sum(self.mpicomm, cost)
        return self.k * cost_global
示例#29
0
文件: io.py 项目: johannesring/Oasis
def check_if_pause(folder):
    """Check if user has put a file named pauseoasis in folder."""
    found = 0
    if 'pauseoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(MPI.comm_world, found)
    if collective > 0:
        if MPI.rank(MPI.comm_world) == 0:
            info_red('pauseoasis Found! Simulations paused. Remove ' +
                     path.join(folder, 'pauseoasis') +
                     ' to resume simulations...')
        return True
    else:
        return False
示例#30
0
 def obs(self, uin):
     """Compute B.uin + eps, where eps is noise
     uin must be a Function(V)"""
     if not(self.noise): return self.Bdot(uin), 0.0
     else:
         Bref = self.Bdot(uin)
         uin_noise, tmp = self.apply_noise(uin.vector().array())
         unoise = Function(self.V)
         unoise.vector()[:] = uin_noise
         Bnoise = self.Bdot(unoise)
         diff = Bref - Bnoise
         noiselevel = np.dot(diff, diff)
         try:
             noiselevel_glob = MPI.sum(self.mycomm, noiselevel)
         except:
             noiselevel_glob = noiselevel
         return Bnoise, noiselevel_glob
示例#31
0
def test_multi_ps_matrix_node_vector_fs(mesh):
    """Tests point source applied to a matrix with given constructor
    PointSource(V, source) and a vector function space when points
    placed at 3 vertices for 1D, 2D and 3D. Global points given to
    constructor from rank 0 processor.

    """

    point = [0.0, 0.5, 1.0]
    rank = MPI.rank(mesh.mpi_comm())
    V = VectorFunctionSpace(mesh, "CG", 1, dim=2)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0) * dot(u, v) * dx)
    dim = mesh.geometry().dim()

    source = []
    point_coords = np.zeros(dim)
    for p in point:
        for i in range(dim):
            point_coords[i - 1] = p
        if rank == 0:
            source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks array sums to correct value
    A.get_diagonal(w.vector())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 2 * len(point) * 10) == 0

    # Check if coordinates are in portion of mesh and if so check that
    # diagonal components sum to the correct value.
    mesh_coords = V.tabulate_dof_coordinates()
    for p in point:
        for i in range(dim):
            point_coords[i] = p

        j = 0
        for i in range(len(mesh_coords) // (dim)):
            mesh_coords_check = mesh_coords[j:j + dim - 1]
            if np.array_equal(point_coords, mesh_coords_check) is True:
                assert np.round(w.vector()[j // (dim)] - 10.0) == 0.0
            j += dim
示例#32
0
def test_multi_ps_matrix_node_vector_fs(mesh):
    """Tests point source applied to a matrix with given constructor
    PointSource(V, source) and a vector function space when points
    placed at 3 vertices for 1D, 2D and 3D. Global points given to
    constructor from rank 0 processor.

    """

    point = [0.0, 0.5, 1.0]
    rank = MPI.rank(mesh.mpi_comm())
    V = VectorFunctionSpace(mesh, "CG", 1, dim=2)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0)*dot(u, v)*dx)
    dim = mesh.geometry().dim()

    source = []
    point_coords = np.zeros(dim)
    for p in point:
        for i in range(dim):
            point_coords[i - 1] = p
        if rank == 0:
            source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks array sums to correct value
    A.get_diagonal(w.vector())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - 2*len(point)*10) == 0

    # Check if coordinates are in portion of mesh and if so check that
    # diagonal components sum to the correct value.
    mesh_coords = V.tabulate_dof_coordinates()
    for p in point:
        for i in range(dim):
            point_coords[i] = p

        j = 0
        for i in range(len(mesh_coords)//(dim)):
            mesh_coords_check = mesh_coords[j:j+dim-1]
            if np.array_equal(point_coords, mesh_coords_check) is True:
                assert np.round(w.vector()[j//(dim)] - 10.0) == 0.0
            j += dim
示例#33
0
    def solvefwd(self, cost=False):
        self.PDE.set_fwd()
        self.solfwd, self.solpfwd, self.solppfwd = [], [], [] 
        self.Bp = []

        #TODO: make fwdsource iterable to return source term
        Ricker = self.fwdsource[0]
        srcv = self.fwdsource[2]
        for sii in self.srcindex:
            ptsrc = self.fwdsource[1][sii]
            def srcterm(tt):
                srcv.zero()
                srcv.axpy(Ricker(tt), ptsrc)
                return srcv
            self.PDE.ftime = srcterm
            solfwd, solpfwd, solppfwd,_ = self.PDE.solve()
            self.solfwd.append(solfwd)
            self.solpfwd.append(solpfwd)
            self.solppfwd.append(solppfwd)

            self.PDEcount += 1

            #TODO: come back and parallellize this too (over time steps)
            Bp = np.zeros((len(self.obsop.PtwiseObs.Points),len(solfwd)))
            for index, sol in enumerate(solfwd):
                setfct(self.p, sol[0])
                Bp[:,index] = self.obsop.obs(self.p)
            self.Bp.append(Bp)

        if cost:
            assert not self.dd == None, "Provide data observations to compute cost"
            self.cost_misfit_local = 0.0
            for Bp, dd in izip(self.Bp, self.dd):
                self.cost_misfit_local += self.obsop.costfct(\
                Bp[:,self.tsteps], dd[:,self.tsteps],\
                self.PDE.times[self.tsteps], self.factors[self.tsteps])
            self.cost_misfit = MPI.sum(self.mpicomm_global, self.cost_misfit_local)
            self.cost_misfit /= len(self.fwdsource[1])
            self.cost_reg = self.regularization.costab(self.PDE.a, self.PDE.b)
            self.cost = self.cost_misfit + self.alpha_reg*self.cost_reg
            if DEBUG:   
                print 'cost_misfit={}, cost_reg={}'.format(\
                self.cost_misfit, self.cost_reg)
示例#34
0
文件: io.py 项目: johannesring/Oasis
def check_if_kill(folder, killtime, total_timer):
    """Check if user has put a file named killoasis in folder or if given killtime has been reached."""
    found = 0
    if 'killoasis' in listdir(folder):
        found = 1
    collective = MPI.sum(MPI.comm_world, found)
    if collective > 0:
        if MPI.rank(MPI.comm_world) == 0:
            remove(path.join(folder, 'killoasis'))
            info_red('killoasis Found! Stopping simulations cleanly...')
        return True
    else:
        elapsed_time = float(total_timer.elapsed()[0])
        if killtime is not None and killtime <= elapsed_time:
            if MPI.rank(MPI.comm_world) == 0:
                info_red(
                    'Given killtime reached! Stopping simulations cleanly...')
            return True
        else:
            return False
示例#35
0
def test_save_and_read_mesh_value_collection_with_only_one_marked_entity(
        tempdir):
    ndiv = 2
    filename = os.path.join(tempdir, "mesh_value_collection.h5")
    mesh = UnitCubeMesh(MPI.comm_world, ndiv, ndiv, ndiv)
    mvc = MeshValueCollection("size_t", mesh, 3)
    mesh.create_entities(3)
    if MPI.rank(mesh.mpi_comm()) == 0:
        mvc.set_value(0, 1)

    # write to file
    with HDF5File(mesh.mpi_comm(), filename, 'w') as f:
        f.write(mvc, "/mesh_value_collection")

    # read from file
    with HDF5File(mesh.mpi_comm(), filename, 'r') as f:
        mvc = f.read_mvc_size_t(mesh, "/mesh_value_collection")
        assert MPI.sum(mesh.mpi_comm(), mvc.size()) == 1
        if MPI.rank(mesh.mpi_comm()) == 0:
            assert mvc.get_value(0, 0) == 1
示例#36
0
def collect_timings(outdir, tic):

    # list_timings(TimingClear.keep, [TimingType.wall, TimingType.system])

    # t = timings(TimingClear.keep, [TimingType.wall, TimingType.user, TimingType.system])
    t = timings(TimingClear.keep, [TimingType.wall])
    # Use different MPI reductions
    t_sum = MPI.sum(MPI.comm_world, t)
    # t_min = MPI.min(MPI.comm_world, t)
    # t_max = MPI.max(MPI.comm_world, t)
    t_avg = MPI.avg(MPI.comm_world, t)
    # Print aggregate timings to screen
    print('\n' + t_sum.str(True))
    # print('\n'+t_min.str(True))
    # print('\n'+t_max.str(True))
    print('\n' + t_avg.str(True))

    # Store to XML file on rank 0
    if MPI.rank(MPI.comm_world) == 0:
        f = File(MPI.comm_self, os.path.join(outdir, "timings_aggregate.xml"))
        f << t_sum
        # f << t_min
        # f << t_max
        f << t_avg

    dump_timings_to_xml(os.path.join(outdir, "timings_avg_min_max.xml"),
                        TimingClear.clear)
    elapsed = time.time() - tic

    comm = mpi4py.MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if rank == 0:
        with open(os.path.join(outdir, 'timings.pkl'), 'w') as f:
            json.dump({'elapsed': elapsed, 'size': size}, f)

    pass
示例#37
0
def test_multi_ps_matrix_node_local(mesh):
    """Tests point source when given constructor PointSource(V, V, point,
    mag) with a matrix when points placed at 3 node for 1D, 2D and
    3D. Local points given to constructor.

    """

    V = FunctionSpace(mesh, "CG", 1)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0) * u * v * dx)

    source = []
    point_coords = mesh.coordinates()[0]
    source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks matrix sums to correct value.
    A.get_diagonal(w.vector())
    size = MPI.size(mesh.mpi_comm())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - size * 10.0) == 0
示例#38
0
def test_multi_ps_matrix_node_local(mesh):
    """Tests point source when given constructor PointSource(V, V, point,
    mag) with a matrix when points placed at 3 node for 1D, 2D and
    3D. Local points given to constructor.

    """

    V = FunctionSpace(mesh, "CG", 1)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0)*u*v*dx)

    source = []
    point_coords = mesh.coordinates()[0]
    source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks matrix sums to correct value.
    A.get_diagonal(w.vector())
    size = MPI.size(mesh.mpi_comm())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - size*10.0) == 0
示例#39
0
def test_GetCells():
    """Get cells of mesh"""
    mesh = UnitSquareMesh(MPI.comm_world, 5, 5)
    assert MPI.sum(mesh.mpi_comm(), len(mesh.cells())) == 50
示例#40
0
    def _update_hdf5_file(self, field_name, saveformat, data, timestep, t):
        """Update hdf5 file with new data.

        This creates a hashed dataset within the dataset to save FunctionSpace
        information only once, and for all subsequent savings only the vector
        is saved and links are created to the FunctionSpace information.

        This ensures that the saving is fully compatible with restart and
        replay on an arbitrary number of processes.
        """
        assert saveformat == "hdf5"
        fullname, metadata = self._get_datafile_name(field_name, saveformat,
                                                     timestep)

        # Create "good enough" hash. This is done to avoid data corruption when restarted from
        # different number of processes, different distribution or different function space
        local_hash = sha1()
        local_hash.update(
            str(data.function_space().mesh().num_cells()).encode())
        local_hash.update(str(data.function_space().ufl_element()).encode())
        local_hash.update(str(data.function_space().dim()).encode())
        local_hash.update(str(MPI.size(mpi_comm_world())).encode())

        # Global hash (same on all processes), 10 digits long
        global_hash = MPI.sum(mpi_comm_world(), int(local_hash.hexdigest(),
                                                    16))
        global_hash = str(int(global_hash % 1e10)).zfill(10)

        #key = (field_name, saveformat)
        #datafile = self._datafile_cache.get(key)
        #if datafile is None:
        #    datafile = HDF5File(mpi_comm_world(), fullname, 'w')
        #    self._datafile_cache[key] = datafile

        # Open HDF5File
        if not os.path.isfile(fullname):
            datafile = HDF5File(mpi_comm_world(), fullname, 'w')
        else:
            datafile = HDF5File(mpi_comm_world(), fullname, 'a')

        # Write to hash-dataset if not yet done
        if not datafile.has_dataset(global_hash) or not datafile.has_dataset(
                global_hash + "/" + field_name):
            datafile.write(data, str(global_hash) + "/" + field_name)

        if not datafile.has_dataset("Mesh"):
            datafile.write(data.function_space().mesh(), "Mesh")

        # Write vector to file
        # TODO: Link vector when function has been written to hash
        datafile.write(data.vector(), field_name + str(timestep) + "/vector")

        # HDF5File.close is broken in 1.4
        if dolfin_version() == "1.4.0+":
            datafile.close()
        del datafile
        # Link information about function space from hash-dataset
        hdf5_link(fullname,
                  str(global_hash) + "/" + field_name + "/x_cell_dofs",
                  field_name + str(timestep) + "/x_cell_dofs")
        hdf5_link(fullname,
                  str(global_hash) + "/" + field_name + "/cell_dofs",
                  field_name + str(timestep) + "/cell_dofs")
        hdf5_link(fullname,
                  str(global_hash) + "/" + field_name + "/cells",
                  field_name + str(timestep) + "/cells")

        return metadata
示例#41
0
def test_diff_then_integrate():

    # Define 1D geometry
    n = 21
    mesh = UnitIntervalMesh(MPI.comm_world, n)

    # Shift and scale mesh
    x0, x1 = 1.5, 3.14
    mesh.coordinates()[:] *= (x1 - x0)
    mesh.coordinates()[:] += x0

    x = SpatialCoordinate(mesh)[0]
    xs = 0.1 + 0.8 * x / x1  # scaled to be within [0.1,0.9]

    # Define list of expressions to test, and configure
    # accuracies these expressions are known to pass with.
    # The reason some functions are less accurately integrated is
    # likely that the default choice of quadrature rule is not perfect
    F_list = []

    def reg(exprs, acc=10):
        for expr in exprs:
            F_list.append((expr, acc))

    # FIXME: 0*dx and 1*dx fails in the ufl-ffc-jit framework somewhere
    # reg([Constant(0.0, cell=cell)])
    # reg([Constant(1.0, cell=cell)])
    monomial_list = [x**q for q in range(2, 6)]
    reg(monomial_list)
    reg([2.3 * p + 4.5 * q for p in monomial_list for q in monomial_list])
    reg([x**x])
    reg([x**(x**2)], 8)
    reg([x**(x**3)], 6)
    reg([x**(x**4)], 2)
    # Special functions:
    reg([atan(xs)], 8)
    reg([sin(x), cos(x), exp(x)], 5)
    reg([ln(xs), pow(x, 2.7), pow(2.7, x)], 3)
    reg([asin(xs), acos(xs)], 1)
    reg([tan(xs)], 7)

    try:
        import scipy
    except ImportError:
        scipy = None

    if hasattr(math, 'erf') or scipy is not None:
        reg([erf(xs)])
    else:
        print(
            "Warning: skipping test of erf, old python version and no scipy.")

    # if 0:
    #     print("Warning: skipping tests of bessel functions, doesn't build on all platforms.")
    # elif scipy is None:
    #     print("Warning: skipping tests of bessel functions, missing scipy.")
    # else:
    #     for nu in (0, 1, 2):
    #         # Many of these are possibly more accurately integrated,
    #         # but 4 covers all and is sufficient for this test
    #         reg([bessel_J(nu, xs), bessel_Y(nu, xs), bessel_I(nu, xs), bessel_K(nu, xs)], 4)

    # To handle tensor algebra, make an x dependent input tensor
    # xx and square all expressions
    def reg2(exprs, acc=10):
        for expr in exprs:
            F_list.append((inner(expr, expr), acc))

    xx = as_matrix([[2 * x**2, 3 * x**3], [11 * x**5, 7 * x**4]])
    x3v = as_vector([3 * x**2, 5 * x**3, 7 * x**4])
    cc = as_matrix([[2, 3], [4, 5]])
    reg2([xx])
    reg2([x3v])
    reg2([cross(3 * x3v, as_vector([-x3v[1], x3v[0], x3v[2]]))])
    reg2([xx.T])
    reg2([tr(xx)])
    reg2([det(xx)])
    reg2([dot(xx, 0.1 * xx)])
    reg2([outer(xx, xx.T)])
    reg2([dev(xx)])
    reg2([sym(xx)])
    reg2([skew(xx)])
    reg2([elem_mult(7 * xx, cc)])
    reg2([elem_div(7 * xx, xx + cc)])
    reg2([elem_pow(1e-3 * xx, 1e-3 * cc)])
    reg2([elem_pow(1e-3 * cc, 1e-3 * xx)])
    reg2([elem_op(lambda z: sin(z) + 2, 0.03 * xx)], 2)  # pretty inaccurate...

    # FIXME: Add tests for all UFL operators:
    # These cause discontinuities and may be harder to test in the
    # above fashion:
    # 'inv', 'cofac',
    # 'eq', 'ne', 'le', 'ge', 'lt', 'gt', 'And', 'Or', 'Not',
    # 'conditional', 'sign',
    # 'jump', 'avg',
    # 'LiftingFunction', 'LiftingOperator',

    # FIXME: Test other derivatives: (but algorithms for operator
    # derivatives are the same!):
    # 'variable', 'diff',
    # 'Dx', 'grad', 'div', 'curl', 'rot', 'Dn', 'exterior_derivative',

    # Run through all operators defined above and compare integrals
    debug = 0
    for F, acc in F_list:
        # Apply UFL differentiation
        f = diff(F, SpatialCoordinate(mesh))[..., 0]
        if debug:
            print(F)
            print(x)
            print(f)

        # Apply integration with DOLFIN
        # (also passes through form compilation and jit)
        M = f * dx
        f_integral = assemble_scalar(M)  # noqa
        f_integral = MPI.sum(mesh.mpi_comm(), f_integral)

        # Compute integral of f manually from anti-derivative F
        # (passes through PyDOLFIN interface and uses UFL evaluation)
        F_diff = F((x1, )) - F((x0, ))

        # Compare results. Using custom relative delta instead
        # of decimal digits here because some numbers are >> 1.
        delta = min(abs(f_integral), abs(F_diff)) * 10**-acc
        assert f_integral - F_diff <= delta
示例#42
0
    def replay(self):
        "Replay problem with given postprocessor."
        # Backup play log
        self.backup_playlog()

        # Set up for replay
        replay_plan = self._fetch_history()
        postprocessors = []
        for fieldname, field in self.postproc._fields.items():
            if not (field.params.save or field.params.plot):
                continue

            # Check timesteps covered by current field
            keys = self._check_field_coverage(replay_plan, fieldname)

            # Get the time dependency for the field
            t_dep = min(
                [dep[1]
                 for dep in self.postproc._dependencies[fieldname]] + [0])

            dep_fields = []
            for dep in self.postproc._full_dependencies[fieldname]:
                if dep[0] in ["t", "timestep"]:
                    continue

                if dep[0] in dep_fields:
                    continue

                # Copy dependency and set save/plot to False. If dependency should be
                # plotted/saved, this field will be added separately.
                dependency = self.postproc._fields[dep[0]]
                dependency = copy.copy(dependency)
                dependency.params.save = False
                dependency.params.plot = False
                dependency.params.safe = False

                dep_fields.append(dependency)

            added_to_postprocessor = False
            for i, (ppkeys, ppt_dep, pp) in enumerate(postprocessors):
                if t_dep == ppt_dep and set(keys) == set(ppkeys):
                    pp.add_fields(dep_fields, exists_reaction="ignore")
                    pp.add_field(field, exists_reaction="replace")

                    added_to_postprocessor = True
                    break
                else:
                    continue

            # Create new postprocessor if no suitable postprocessor found
            if not added_to_postprocessor:
                pp = PostProcessor(self.postproc.params, self.postproc._timer)
                pp.add_fields(dep_fields, exists_reaction="ignore")
                pp.add_field(field, exists_reaction="replace")

                postprocessors.append([keys, t_dep, pp])

        postprocessors = sorted(postprocessors,
                                key=itemgetter(1),
                                reverse=True)

        t_independent_fields = []
        for fieldname in self.postproc._fields:
            if self.postproc._full_dependencies[fieldname] == []:
                t_independent_fields.append(fieldname)
            elif min(t for dep, t in
                     self.postproc._full_dependencies[fieldname]) == 0:
                t_independent_fields.append(fieldname)

        # Run replay
        sorted_keys = sorted(replay_plan.keys())
        N = max(sorted_keys)
        for timestep in sorted_keys:
            cbc_print("Processing timestep %d of %d. %.3f%% complete." %
                      (timestep, N, 100.0 * (timestep) / N))

            # Load solution at this timestep (all available fields)
            solution = replay_plan[timestep]
            t = solution.pop("t")

            # Cycle through postprocessors and update if required
            for ppkeys, ppt_dep, pp in postprocessors:
                if timestep in ppkeys:
                    # Add dummy solutions to avoid error when handling dependencies
                    # We know this should work, because it has already been established that
                    # the fields to be computed at this timestep can be computed from stored
                    # solutions.
                    for field in pp._sorted_fields_keys:
                        for dep in reversed(pp._dependencies[field]):
                            if not have_necessary_deps(solution, pp, dep[0]):
                                solution[dep[0]] = lambda: None
                    pp.update_all(solution, t, timestep)

                    # Clear None-objects from solution
                    [
                        solution.pop(k) for k in solution.keys()
                        if not solution[k]
                    ]

                    # Update solution to avoid re-computing data
                    for fieldname, value in pp._cache[0].items():
                        if fieldname in t_independent_fields:
                            value = pp._cache[0][fieldname]
                            #solution[fieldname] = lambda value=value: value # Memory leak!
                            solution[fieldname] = MiniCallable(value)

            self.timer.increment()
            if self.params.check_memory_frequency != 0 and timestep % self.params.check_memory_frequency == 0:
                cbc_print('Memory usage is: %s' %
                          MPI.sum(mpi_comm_world(), get_memory_usage()))

            # Clean up solution: Required to avoid memory leak for some reason...
            for f, v in solution.items():
                if isinstance(v, MiniCallable):
                    v.value = None
                    del v
                    solution.pop(f)

        for ppkeys, ppt_dep, pp in postprocessors:
            pp.finalize_all()