示例#1
0
    def _correct_postprocessing(self, restart_timestep):
        "Removes data from casedir found at timestep>restart_timestep."
        playlog = self._pp.get_playlog('r')
        playlog_to_remove = {}
        for k, v in playlog.items():
            if int(k) >= restart_timestep:
                #playlog_to_remove[k] = playlog.pop(k)
                playlog_to_remove[k] = playlog[k]
        playlog.close()

        MPI.barrier(mpi_comm_world())
        if on_master_process():
            playlog = self._pp.get_playlog()
            [playlog.pop(k) for k in playlog_to_remove.keys()]
            playlog.close()

        MPI.barrier(mpi_comm_world())
        all_fields_to_clean = []

        for k, v in playlog_to_remove.items():
            if "fields" not in v:
                continue
            else:
                all_fields_to_clean += v["fields"].keys()
        all_fields_to_clean = list(set(all_fields_to_clean))
        for fieldname in all_fields_to_clean:
            self._clean_field(fieldname, restart_timestep)
示例#2
0
    def store_params(self, params):
        """Store parameters in casedir as params.pickle and params.txt.

        Forwarded to a Saver-instance.
        """
        self._saver.store_params(params)
        MPI.barrier(mpi_comm_world())
示例#3
0
    def compute(self, get):
        u = get(self.valuename)
        if u is None:
            return None

        if not hasattr(self, "u"):
            self.before_first_compute(get)

        if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
            rank = len(u.ufl_shape)
        else:
            rank = u.rank()

        if rank > 0:
            u = u.split()
            U = []
            for i, _u in enumerate(u):
                U.append(_interpolate(self.us[i], _u))
                #U.append(self._ft.interpolate_nonmatching_mesh(_u, self.us.function_space()))
            MPI.barrier(mpi_comm_world())

            self.assigner.assign(self.u, U)
        else:
            _interpolate(self.u, u)
            MPI.barrier(mpi_comm_world())

            # FIXME: This gives a PETSc-error (VecCopy). Unnecessary interpolation used instead.
            #self.u.assign(U)
            #self.u.assign(interpolate(U, self.u.function_space()))
        return self.u
示例#4
0
    def _clean_casedir(self):
        "Cleans out all files produced by cbcpost in the current casedir."
        MPI.barrier(mpi_comm_world())
        if on_master_process():
            if os.path.isdir(self.get_casedir()):
                try:
                    playlog = self._fetch_playlog()
                except:
                    playlog = None

                if playlog is not None:
                    all_fields = []
                    for v in playlog.values():
                        all_fields += v.get("fields", {}).keys()

                    all_fields = list(set(all_fields))
                    playlog.close()

                    for field in all_fields:
                        rmtree(os.path.join(self.get_casedir(), field))

                    for f in [
                            "mesh.hdf5", "play.db", "params.txt",
                            "params.pickle"
                    ]:

                        if os.path.isfile(os.path.join(self.get_casedir(), f)):
                            os.remove(os.path.join(self.get_casedir(), f))

        MPI.barrier(mpi_comm_world())
示例#5
0
def casedir(request):
    # Some code here copied from dolfin_utils dev:

    # Get directory name of test_foo.py file
    testfile = request.module.__file__
    testfiledir = os.path.dirname(os.path.abspath(testfile))

    # Construct name test_foo_tempdir from name test_foo.py
    testfilename = os.path.basename(testfile)
    outputname = testfilename.replace(".py", "_casedir")

    # Get function name test_something from test_foo.py
    function = request.function.__name__

    # Join all of these to make a unique path for this test function
    basepath = os.path.join(testfiledir, outputname)
    casedir = os.path.join(basepath, function)

    # Unlike the dolfin_utils tempdir fixture, here we make sure the directory is _deleted_:
    gc.collect() # Workaround for possible dolfin deadlock
    MPI.barrier(mpi_comm_world())
    try:
        # Only on root node in parallel
        if MPI.size(mpi_comm_world()) == 1 or MPI.rank(mpi_comm_world()) == 0:
            shutil.rmtree(casedir)
    except:
        pass
    MPI.barrier(mpi_comm_world())

    return casedir
示例#6
0
    def store_mesh(self, mesh, cell_domains=None, facet_domains=None):
        """Store mesh in casedir to mesh.hdf5 (dataset Mesh) in casedir.

        Forwarded to a Saver-instance.
        """
        self._saver.store_mesh(mesh, cell_domains, facet_domains)
        MPI.barrier(mpi_comm_world())
示例#7
0
def gc_barrier():
    """Internal utility to easily switch on and off calls to
    gc.collect() and MPI.barrier(world) in all fixtures here.
    Helps make the tests deterministic when debugging.
    """
    gc.collect()
    if MPI.size(mpi_comm_world()) > 1:
        MPI.barrier(mpi_comm_world())
示例#8
0
def gc_barrier():
    """Internal utility to easily switch on and off calls to
    gc.collect() and MPI.barrier(world) in all fixtures here.
    Helps make the tests deterministic when debugging.
    """
    gc.collect()
    if MPI.size(mpi_comm_world()) > 1:
        MPI.barrier(mpi_comm_world())
示例#9
0
def pytest_runtest_teardown(item, nextitem):
    # Carry out additional teardown
    if hasattr(item, "_runtest_teardown_function"):
        item._runtest_teardown_function()
    # Do the normal teardown
    item.teardown()
    # Add a MPI barrier in parallel
    MPI.barrier(MPI.comm_world)
示例#10
0
文件: conftest.py 项目: ljnpu/RBniCS
def pytest_runtest_teardown(item, nextitem):
    # Do the normal teardown
    item.teardown()
    # Add a MPI barrier in parallel
    if has_pybind11():
        MPI.barrier(MPI.comm_world)
    else:
        MPI.barrier(mpi_comm_world())
示例#11
0
def find_solution_presence(pp, playlog, fields):
    "Search play-log to find where solution items are saved in a loadable format"
    present_solution = defaultdict(list)

    functions = dict()
    metadatas = dict()
    for ts, data in playlog.items():
        for fieldname in data.get("fields", []):
            # Continue if field in a format we can't read back
            if not any([saveformat in loadable_formats for saveformat in data["fields"][fieldname]["save_as"]]):
                continue

            # Check if field is present and part of solution we're searching for
            is_present = False
            if fields == "default" and data["fields"][fieldname]["type"] == "SolutionField":
                is_present = True
            elif fieldname in fields:
                is_present = True

            if is_present:
                function = None
                metadata = metadatas.setdefault(fieldname, shelve.open(os.path.join(pp.get_savedir(fieldname), "metadata.db"), 'r'))
                if 'hdf5' in data["fields"][fieldname]["save_as"]:
                    filename = os.path.join(pp.get_savedir(fieldname), fieldname+'.hdf5')

                    if fieldname in functions:
                        function = functions[fieldname]
                    else:
                        function = functions.setdefault(fieldname, create_function_from_metadata(pp, fieldname, metadata, 'hdf5'))

                    present_solution[fieldname].append(Loadable(filename, fieldname, ts, data["t"], 'hdf5', function))
                elif 'xml' in data["fields"][fieldname]["save_as"]:
                    filename = os.path.join(pp.get_savedir(fieldname), fieldname+str(ts)+'.xml')

                    if fieldname in functions:
                        function = functions[fieldname]
                    else:
                        function = functions.setdefault(fieldname, create_function_from_metadata(pp, fieldname, metadata, 'xml'))

                    present_solution[fieldname].append(Loadable(filename, fieldname, ts, data["t"], 'xml', function))
                elif 'xml.gz' in data["fields"][fieldname]["save_as"]:
                    filename = os.path.join(pp.get_savedir(fieldname), fieldname+str(ts)+'.xml.gz')

                    if fieldname in functions:
                        function = functions[fieldname]
                    else:
                        function = functions.setdefault(fieldname, create_function_from_metadata(pp, fieldname, metadata, 'xml.gz'))

                    present_solution[fieldname].append(Loadable(filename, fieldname, ts, data["t"], 'xml.gz', function))
                elif 'shelve' in data["fields"][fieldname]["save_as"]:
                    filename = os.path.join(pp.get_savedir(fieldname), fieldname+'.db')
                    present_solution[fieldname].append(Loadable(filename, fieldname, ts, data["t"], "shelve", None))

    [f.close() for f in metadatas.values()]
    MPI.barrier(mpi_comm_world())
    return present_solution
示例#12
0
 def _clean_shelve(self, fieldname, del_metadata):
     shelvefilename = os.path.join(self._pp.get_savedir(fieldname), fieldname+".db")
     if on_master_process():
         if os.path.isfile(shelvefilename):
             shelvefile = shelve.open(shelvefilename, 'c')
             for k,v in del_metadata.items():
                 if 'shelve' in v:
                     shelvefile.pop(str(k))
             shelvefile.close()
     MPI.barrier(mpi_comm_world())
示例#13
0
    def _clean_hdf5(self, fieldname, del_metadata):
        delete_from_hdf5_file = '''
        namespace dolfin {
            #include <hdf5.h>
            void delete_from_hdf5_file(const MPI_Comm comm,
                                       const std::string hdf5_filename,
                                       const std::string dataset,
                                       const bool use_mpiio)
            {
                //const hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
                // Open file existing file for append
                //hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);
                hid_t hdf5_file_id = HDF5Interface::open_file(comm, hdf5_filename, "a", use_mpiio);

                H5Ldelete(hdf5_file_id, dataset.c_str(), H5P_DEFAULT);
                HDF5Interface::close_file(hdf5_file_id);
            }
        }
        '''
        cpp_module = compile_extension_module(
            delete_from_hdf5_file,
            additional_system_headers=["dolfin/io/HDF5Interface.h"])

        hdf5filename = os.path.join(self._pp.get_savedir(fieldname),
                                    fieldname + '.hdf5')

        if not os.path.isfile(hdf5filename):
            return

        for k, v in del_metadata.items():
            if 'hdf5' not in v:
                continue
            else:
                cpp_module.delete_from_hdf5_file(
                    mpi_comm_world(), hdf5filename, v['hdf5']['dataset'],
                    MPI.size(mpi_comm_world()) > 1)

        hdf5tmpfilename = os.path.join(self._pp.get_savedir(fieldname),
                                       fieldname + '_tmp.hdf5')
        #import ipdb; ipdb.set_trace()
        MPI.barrier(mpi_comm_world())
        if on_master_process():
            # status, result = getstatusoutput("h5repack -V")
            status, result = -1, -1
            if status != 0:
                cbc_warning(
                    "Unable to run h5repack. Will not repack hdf5-files before replay, which may cause bloated hdf5-files."
                )
            else:
                subprocess.call("h5repack %s %s" %
                                (hdf5filename, hdf5tmpfilename),
                                shell=True)
                os.remove(hdf5filename)
                os.rename(hdf5tmpfilename, hdf5filename)
        MPI.barrier(mpi_comm_world())
示例#14
0
    def wrapper(func, *args, **kwargs):

        # Run function
        rv = func(*args, **kwargs)

        # Collect garbage of temporaries of the function
        # and return collectively
        gc.collect()
        MPI.barrier(MPI.comm_world)

        return rv
示例#15
0
def run():
    mesh = dl.UnitSquareMesh(50, 50)
    Vr = dl.FunctionSpace(mesh, 'Lagrange', 1)
    Vphi = dl.FunctionSpace(mesh, 'Lagrange', 2)
    Vphidofmap = Vphi.dofmap().dofs()
    test, trial = dl.TestFunction(Vphi), dl.TrialFunction(Vphi)
    u, v = dl.Function(Vphi), dl.Function(Vphi)
    rho = dl.Function(Vr)
    Mweak = dl.inner(rho * test, trial) * dl.dx
    Mprime = LumpedMassMatrixPrime(Vr, Vphi, None)
    h = 1e-5
    fact = [1.0, -1.0]

    RHO = \
    [dl.interpolate(dl.Expression('2.0 + sin(n*pi*x[0])*sin(n*pi*x[1])', n=1.0, degree=10), Vr), \
    dl.interpolate(dl.Expression('2.0 + sin(n*pi*x[0])*sin(n*pi*x[1])', n=8.0, degree=10), Vr), \
    dl.interpolate(dl.Expression('2.0 + sin(2*pi*x[0])*sin(1*pi*x[1])*(x[0]<0.5)', degree=10), Vr), \
    dl.interpolate(dl.Expression('2.0 + 2.0*(x[0]<0.5)*(x[1]<0.5) - 1.8*(x[0]>=0.5)*(x[1]<0.5)', degree=10), Vr)]

    np.random.seed(11)
    locsize = len(u.vector().array())
    for jj, rho1 in enumerate(RHO):
        if mpirank == 0: print '\nmedium {}'.format(jj)
        setfct(rho, rho1)
        M = dl.assemble(Mweak)
        Ml = LumpedMatrixSolverS(Vphi)
        Ml.set_operator(M)
        Mprime.updater(Ml.ratio)
        for ii in range(5):
            rndvecu = np.random.randn(Vphi.dim())
            rndvecv = np.random.randn(Vphi.dim())
            if mpirank == 0: print 'test {}'.format(ii)
            rnddir = dl.interpolate(dl.Expression('2.0+sin(n*pi*x[0])*sin(n*pi*x[1])',\
            n=ii+1, degree=10), Vr)
            setfct(u, rndvecu[Vphidofmap])
            setfct(v, rndvecv[Vphidofmap])
            analytical = rnddir.vector().inner(
                Mprime.get_gradient(u.vector(), v.vector()))
            uMv = []
            for ff in fact:
                setfct(rho, rho1)
                rho.vector().axpy(ff * h, rnddir.vector())
                M = dl.assemble(Mweak)
                Ml = LumpedMatrixSolverS(Vphi)
                Ml.set_operator(M)
                uMv.append(u.vector().inner(Ml * v.vector()))
            fd = (uMv[0] - uMv[1]) / (2 * h)
            err = np.abs((analytical - fd) / analytical)
            if mpirank == 0:
                print 'analytical={}, fd={}, err={}'.format(
                    analytical, fd, err),
                if err < 1e-6: print '\t =>> OK!!'
                else: print ''
            MPI.barrier(mycomm)
示例#16
0
def test_mpset():
    #set_log_level(DEBUG)

    # Print parameters and their values
    #mpset.show()

    # Check that assignment out of range raises
    # FIXME: dolfin/parameter/Parameter.cpp is broken.
    #        It doesn't raise when assigning a value out of range;
    #        see 921c56cee4f50f016a07f49a5e90f6627c7317a6
    # with pytest.raises(RuntimeError):
    #     mpset["discretization"]["N"] = 1
    # with pytest.raises(RuntimeError):
    #     mpset["model"]["mobility"]["beta"] = 2.0
    with pytest.raises(RuntimeError):
        mpset["model"]["mobility"]["m"] = 0.0

    # Try to add parameter
    mpset.add("foo", "bar")
    assert mpset["foo"] == "bar"

    # Try direct access to a parameter
    mpset["foo"] = "bar_"
    assert mpset["foo"] == "bar_"

    # Try to write parameters to a file
    comm = mpi_comm_world()
    tempdir = "/tmp/pytest-of-fenics"
    fname = tempdir + "/foo.xml"
    mpset.write(comm, fname)
    if MPI.rank(comm) == 0:
        assert os.path.isfile(fname)
    MPI.barrier(comm)  # wait until the file is written

    # Change back value of parameter 'foo'
    mpset["foo"] = "bar"
    assert mpset["foo"] == "bar"

    # Try to read parameters back
    mpset.read(fname)
    assert mpset["foo"] == "bar_"
    MPI.barrier(comm)  # wait until each process finishes reading
    if MPI.rank(comm) == 0:
        os.remove(fname)
    del fname

    # Check that every other call points to the same object
    assert id(MuflonParameterSet()) == id(mpset)

    # Cleanup
    set_log_level(INFO)
    mpset.refresh()
示例#17
0
    def backup_playlog(self):
        "Create a backup of the playlog"
        if MPI.rank(mpi_comm_world()) == 0:
            casedir = self.postproc.get_casedir()
            playlog_file = os.path.join(casedir, "play.db")
            i = 0
            backup_file = playlog_file + ".bak" + str(i)

            while os.path.isfile(backup_file):
                i += 1
                backup_file = playlog_file + ".bak" + str(i)
            os.system("cp %s %s" % (playlog_file, backup_file))
        MPI.barrier(mpi_comm_world())
示例#18
0
def pytest_runtest_teardown(item):
    """Collect garbage after every test to force calling
    destructors which might be collective"""

    # Do the normal teardown
    item.teardown()

    # Collect the garbage (call destructors collectively)
    del item
    # NOTE: How are we sure that 'item' does not hold references
    #       to temporaries and someone else does not hold a reference
    #       to 'item'?! Well, it seems that it works...
    gc.collect()
    MPI.barrier(MPI.comm_world)
def print_reference(results):
    "Print nicely formatted values for gluing into code as a reference"
    MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        print("reference = {", end=' ')
        for (i, result) in enumerate(results):
            if i > 0:
                print("             ", end=' ')
            print("(\"%s\", %d): %.16g" % result, end=' ')
            if i < len(results) - 1:
                print(",")
            else:
                print("}")
    MPI.barrier(MPI.comm_world)
示例#20
0
def pytest_runtest_teardown(item):
    """Collect garbage after every test to force calling
    destructors which might be collective"""

    # Do the normal teardown
    item.teardown()

    # Collect the garbage (call destructors collectively)
    del item
    # NOTE: How are we sure that 'item' does not hold references
    #       to temporaries and someone else does not hold a reference
    #       to 'item'?! Well, it seems that it works...
    gc.collect()
    MPI.barrier(mpi_comm_world())
示例#21
0
def _create_tempdir(request):
    # Get directory name of test_foo.py file
    testfile = request.module.__file__
    testfiledir = os.path.dirname(os.path.abspath(testfile))

    # Construct name test_foo_tempdir from name test_foo.py
    testfilename = os.path.basename(testfile)
    outputname = testfilename.replace(".py", "_tempdir_{}".format(
        worker_id(request)))

    # Get function name test_something from test_foo.py
    function = request.function.__name__

    # Join all of these to make a unique path for this test function
    basepath = os.path.join(testfiledir, outputname)
    path = os.path.join(basepath, function)

    # Add a sequence number to avoid collisions when tests are
    # otherwise parameterized
    if MPI.rank(MPI.comm_world) == 0:
        _create_tempdir._sequencenumber[path] += 1
        sequencenumber = _create_tempdir._sequencenumber[path]
        sequencenumber = MPI.sum(MPI.comm_world, sequencenumber)
    else:
        sequencenumber = MPI.sum(MPI.comm_world, 0)
    path += "__" + str(sequencenumber)

    # Delete and re-create directory on root node
    if MPI.rank(MPI.comm_world) == 0:
        # First time visiting this basepath, delete the old and create
        # a new
        if basepath not in _create_tempdir._basepaths:
            _create_tempdir._basepaths.add(basepath)
            if os.path.exists(basepath):
                shutil.rmtree(basepath)
            # Make sure we have the base path test_foo_tempdir for
            # this test_foo.py file
            if not os.path.exists(basepath):
                os.mkdir(basepath)

        # Delete path from old test run
        if os.path.exists(path):
            shutil.rmtree(path)
        # Make sure we have the path for this test execution:
        # e.g. test_foo_tempdir/test_something__3
        if not os.path.exists(path):
            os.mkdir(path)
    MPI.barrier(MPI.comm_world)

    return path
示例#22
0
    def _clean_files(self, fieldname, del_metadata):
        for k, v in del_metadata.items():
            for i in v.values():
                MPI.barrier(mpi_comm_world())
                try:
                    i["filename"]
                except:
                    continue

                fullpath = os.path.join(self._pp.get_savedir(fieldname), i['filename'])

                if on_master_process():
                    os.remove(fullpath)
                MPI.barrier(mpi_comm_world())
            """
def print_errors(errors):
    MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        print("Checking results")
        print("----------------")
        for (mesh_file, degree, norm, ref, diff) in errors:
            print("(%s, %d):\t" % (mesh_file, degree), end=' ')
            if diff is None:
                print("missing reference")
            else:
                print("*** ERROR", end=' ')
                print(
                    "(norm = %.16g, reference = %.16g, relative diff = %.16g)"
                    % (norm, ref, diff))
    MPI.barrier(MPI.comm_world)
示例#24
0
文件: io.py 项目: johannesring/Oasis
def create_initial_folders(folder, restart_folder, sys_comp, tstep, info_red,
                           scalar_components, output_timeseries_as_vector,
                           **NS_namespace):
    """Create necessary folders."""
    info_red("Creating initial folders")
    # To avoid writing over old data create a new folder for each run
    if MPI.rank(MPI.comm_world) == 0:
        try:
            makedirs(folder)
        except OSError:
            pass

    MPI.barrier(MPI.comm_world)
    newfolder = path.join(folder, 'data')
    if restart_folder:
        newfolder = path.join(newfolder, restart_folder.split('/')[-2])
    else:
        if not path.exists(newfolder):
            newfolder = path.join(newfolder, '1')
        else:
            #previous = listdir(newfolder)
            previous = [f for f in listdir(newfolder) if not f.startswith('.')]
            previous = max(map(eval, previous)) if previous else 0
            newfolder = path.join(newfolder, str(previous + 1))

    MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        if not restart_folder:
            #makedirs(path.join(newfolder, "Voluviz"))
            #makedirs(path.join(newfolder, "Stats"))
            #makedirs(path.join(newfolder, "VTK"))
            makedirs(path.join(newfolder, "Timeseries"))
            makedirs(path.join(newfolder, "Checkpoint"))

    tstepfolder = path.join(newfolder, "Timeseries")
    tstepfiles = {}
    comps = sys_comp
    if output_timeseries_as_vector:
        comps = ['p', 'u'] + scalar_components

    for ui in comps:
        tstepfiles[ui] = XDMFFile(
            MPI.comm_world,
            path.join(tstepfolder, ui + '_from_tstep_{}.xdmf'.format(tstep)))
        tstepfiles[ui].parameters["rewrite_function_mesh"] = False
        tstepfiles[ui].parameters["flush_output"] = True

    return newfolder, tstepfiles
示例#25
0
文件: io.py 项目: mikaem/Oasis
def create_initial_folders(folder, restart_folder, sys_comp, tstep, info_red,
                           scalar_components, output_timeseries_as_vector,
                           **NS_namespace):
    """Create necessary folders."""
    info_red("Creating initial folders")
    # To avoid writing over old data create a new folder for each run
    if MPI.rank(MPI.comm_world) == 0:
        try:
            makedirs(folder)
        except OSError:
            pass

    MPI.barrier(MPI.comm_world)
    newfolder = path.join(folder, 'data')
    if restart_folder:
        newfolder = path.join(newfolder, restart_folder.split('/')[-2])
    else:
        if not path.exists(newfolder):
            newfolder = path.join(newfolder, '1')
        else:
            previous = listdir(newfolder)
            previous = max(map(eval, previous)) if previous else 0
            newfolder = path.join(newfolder, str(previous + 1))

    MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        if not restart_folder:
            #makedirs(path.join(newfolder, "Voluviz"))
            #makedirs(path.join(newfolder, "Stats"))
            #makedirs(path.join(newfolder, "VTK"))
            makedirs(path.join(newfolder, "Timeseries"))
            makedirs(path.join(newfolder, "Checkpoint"))

    tstepfolder = path.join(newfolder, "Timeseries")
    tstepfiles = {}
    comps = sys_comp
    if output_timeseries_as_vector:
        comps = ['p', 'u'] + scalar_components

    for ui in comps:
        tstepfiles[ui] = XDMFFile(MPI.comm_world, path.join(
            tstepfolder, ui + '_from_tstep_{}.xdmf'.format(tstep)))
        tstepfiles[ui].parameters["rewrite_function_mesh"] = False
        tstepfiles[ui].parameters["flush_output"] = True

    return newfolder, tstepfiles
示例#26
0
def _create_tempdir(request):
    # Get directory name of test_foo.py file
    testfile = request.module.__file__
    testfiledir = os.path.dirname(os.path.abspath(testfile))

    # Construct name test_foo_tempdir from name test_foo.py
    testfilename = os.path.basename(testfile)
    outputname = testfilename.replace(".py", "_tempdir")

    # Get function name test_something from test_foo.py
    function = request.function.__name__

    # Join all of these to make a unique path for this test function
    basepath = os.path.join(testfiledir, outputname)
    path = os.path.join(basepath, function)

    # Add a sequence number to avoid collisions when tests are otherwise parameterized
    if MPI.rank(mpi_comm_world()) == 0:
        _create_tempdir._sequencenumber[path] += 1
        sequencenumber = _create_tempdir._sequencenumber[path]
        sequencenumber = MPI.sum(mpi_comm_world(), sequencenumber)
    else:
        sequencenumber = MPI.sum(mpi_comm_world(), 0)
    path += "__" + str(sequencenumber)

    # Delete and re-create directory on root node
    if MPI.rank(mpi_comm_world()) == 0:
        # First time visiting this basepath, delete the old and create a new
        if basepath not in _create_tempdir._basepaths:
            _create_tempdir._basepaths.add(basepath)
            #if os.path.exists(basepath):
            #    shutil.rmtree(basepath)
            # Make sure we have the base path test_foo_tempdir for this test_foo.py file
            if not os.path.exists(basepath):
                os.mkdir(basepath)

        # Delete path from old test run
        #if os.path.exists(path):
        #    shutil.rmtree(path)
        # Make sure we have the path for this test execution: e.g. test_foo_tempdir/test_something__3
        if not os.path.exists(path):
            os.mkdir(path)
    MPI.barrier(mpi_comm_world())

    return path
示例#27
0
def checkpoint(dvp_, default_variables, checkpoint_folder, mesh, **namespace):
    """Utility function for storing the current parameters and the last two time steps"""
    # Only update variables that exists in default_variables
    default_variables.update(
        (k, namespace[k])
        for k in (default_variables.keys() & namespace.keys()))

    # Dump default parameters
    if MPI.rank(MPI.comm_world) == 0:
        with open(str(checkpoint_folder.joinpath("default_variables.pickle")),
                  "bw") as f:
            pickle.dump(default_variables, f)

    # Dump physical fields
    fields = _get_fields(dvp_, mesh)

    # Write fields to temporary file to avoid corruption of existing checkpoint
    for name, field in fields:
        checkpoint_path = str(
            checkpoint_folder.joinpath("tmp_" + name + ".xdmf"))
        with XDMFFile(MPI.comm_world, checkpoint_path) as f:
            f.write_checkpoint(field, name)

    # Move to correct checkpoint name
    MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        for name, _ in fields:
            for suffix in [".h5", ".xdmf"]:
                new_name = checkpoint_folder.joinpath("checkpoint_" + name +
                                                      suffix)
                if new_name.exists():
                    checkpoint_folder.joinpath("tmp_" + name +
                                               suffix).replace(new_name)
                else:
                    checkpoint_folder.joinpath("tmp_" + name +
                                               suffix).rename(new_name)

            # Rename link in xdmf file
            with open(new_name, "r") as f:
                text = f.read().replace("tmp_", "checkpoint_")

            with open(new_name, "w") as f:
                f.write(text)
示例#28
0
def create_initial_folders(folder, restart_folder, fields, tstep, parameters):
    """ Create initial folders """
    info_cyan("Creating folders.")

    makedirs_safe(folder)
    MPI.barrier(mpi_comm_world())
    if restart_folder:
        newfolder = restart_folder.split("Checkpoint")[0]
    else:
        previous_list = os.listdir(folder)
        if len(previous_list) == 0:
            newfolder = os.path.join(folder, "1")
        else:
            previous = max([
                int(entry) if entry.isdigit() else 0 for entry in previous_list
            ])
            newfolder = os.path.join(folder, str(previous + 1))

    MPI.barrier(mpi_comm_world())
    tstepfolder = os.path.join(newfolder, "Timeseries")
    makedirs_safe(tstepfolder)
    makedirs_safe(os.path.join(newfolder, "Statistics"))
    settingsfolder = os.path.join(newfolder, "Settings")
    makedirs_safe(settingsfolder)
    makedirs_safe(os.path.join(newfolder, "Checkpoint"))

    # Initialize timestep files
    tstepfiles = dict()
    for field in fields:
        filename = os.path.join(tstepfolder,
                                field + "_from_tstep_{}.xdmf".format(tstep))
        tstepfiles[field] = XDMFFile(mpi_comm_world(), filename)
        tstepfiles[field].parameters["rewrite_function_mesh"] = False
        tstepfiles[field].parameters["flush_output"] = True

    # Dump settings
    if mpi_is_root():
        dump_parameters(
            parameters,
            os.path.join(settingsfolder,
                         "parameters_from_tstep_{}.dat".format(tstep)))

    return newfolder, tstepfiles
示例#29
0
def safe_mkdir(dir):
    """Create directory without exceptions in parallel."""
    # Create directory
    if not os.path.isdir(dir):
        try:
            os.makedirs(dir, mode=0775)
        except:
            # Allow race condition when multiple processes
            # work in same directory, ignore exception.
            pass

    # Wait for all processes to finish, hopefully somebody
    # managed to create the directory...
    MPI.barrier(mpi_comm_world())

    # Warn if this failed
    if not os.path.isdir(dir):
        #warning("FAILED TO CREATE DIRECTORY %s" % (dir,))
        Exception("FAILED TO CREATE DIRECTORY %s" % (dir,))
示例#30
0
    def finalize_all(self):
        "Finalize all Fields after last timestep has been computed."
        finalized = []
        for name in self._sorted_fields_keys:
            if name in self._finalized:
                continue
            field = self._fields[name]
            fp = field.params
            if fp.finalize and (fp.safe or fp.plot
                                or fp.save) and name not in fp:
                self.get(name, compute=False, finalize=True)
                finalized.append(field)

        t = self._cache[0].get("t", -1e16)
        timestep = self._cache[0].get("timestep", -1e16)

        self._saver.update(t, timestep, self._cache[0], finalized)
        self._saver._flush_data()
        self._plotter.update(t, timestep, self._cache[0], finalized)
        self._saver._close_shelves()
        MPI.barrier(mpi_comm_world())
示例#31
0
    def _clean_xdmf(self, fieldname, del_metadata):
        basename = os.path.join(self._pp.get_savedir(fieldname), fieldname)
        if os.path.isfile(basename + ".xdmf"):
            MPI.barrier(mpi_comm_world())

            i = 0
            while True:
                h5_filename = basename + "_RS" + str(i) + ".h5"
                if not os.path.isfile(h5_filename):
                    break
                i = i + 1

            xdmf_filename = basename + "_RS" + str(i) + ".xdmf"
            MPI.barrier(mpi_comm_world())

            if on_master_process():
                os.rename(basename + ".h5", h5_filename)
                os.rename(basename + ".xdmf", xdmf_filename)

                f = open(xdmf_filename, 'r').read()

                new_f = open(xdmf_filename, 'w')
                new_f.write(
                    f.replace(
                        os.path.split(basename)[1] + ".h5",
                        os.path.split(h5_filename)[1]))
                new_f.close()
        MPI.barrier(mpi_comm_world())
示例#32
0
    def update_all(self, solution, t, timestep):
        "Updates cache, plan, play log and executes plan."
        MPI.barrier(mpi_comm_world())

        # TODO: Better design solution to making these variables accessible the right places?
        self._solution = solution

        # Update play log
        self._saver._update_playlog(t, timestep)

        # Update cache to keep what's needed later according to plan, forget what we don't need
        self._update_cache(t, timestep)

        # Plan what we need to compute now and in near future based on action triggers and dependencies
        self._plan, self._finalize_plan, self._last_trigger_time = \
            self._planner.update(self._fields, self._full_dependencies, self._dependencies, t, timestep)
        self._timer.completed("PP: updated plan.")

        # Compute what's needed according to plan
        self._execute_plan(t, timestep)
        self._timer.completed("PP: execute plan")

        triggered_or_finalized = []
        for name in self._cache[0]:
            if (name in self._finalize_plan
                    or self._last_trigger_time[name][1] == timestep):
                triggered_or_finalized.append(self._fields[name])

        self._saver.update(t, timestep, self._cache[0], triggered_or_finalized)
        self._timer.completed("PP: completed saving")

        self._plotter.update(t, timestep, self._cache[0],
                             triggered_or_finalized)
        self._timer.completed("PP: completed plotting")

        self._update_all_count += 1
        MPI.barrier(mpi_comm_world())
示例#33
0
def save_checkpoint(tstep, t, w_, w_1, newfolder, parameters):
    """ Save checkpoint files.

    A part of this is taken from the Oasis code."""
    checkpointfolder = os.path.join(newfolder, "Checkpoint")
    parameters["num_processes"] = MPI.size(mpi_comm_world())
    parameters["t_0"] = t
    parameters["tstep"] = tstep
    parametersfile = os.path.join(checkpointfolder, "parameters.dat")
    parametersfile_old = parametersfile + ".old"
    if mpi_is_root():
        # In case of failure, keep old file.
        if os.path.exists(parametersfile):
            os.system("mv {0} {1}".format(parametersfile, parametersfile_old))
        dump_parameters(parameters, parametersfile)

    MPI.barrier(mpi_comm_world())
    h5filename = os.path.join(checkpointfolder, "fields.h5")
    h5filename_old = h5filename + ".old"
    # In case of failure, keep old file.
    if mpi_is_root() and os.path.exists(h5filename):
        os.system("mv {0} {1}".format(h5filename, h5filename_old))
    h5file = HDF5File(mpi_comm_world(), h5filename, "w")
    h5file.flush()
    for field in w_:
        info_red("Storing subproblem: " + field)
        MPI.barrier(mpi_comm_world())
        h5file.write(w_[field], field + "/current")
        if field in w_1:
            h5file.write(w_1[field], field + "/previous")
        MPI.barrier(mpi_comm_world())
    h5file.close()
    # Since program is still running, delete the old files.
    remove_safe(h5filename_old)
    MPI.barrier(mpi_comm_world())
    remove_safe(parametersfile_old)
示例#34
0
文件: io.py 项目: johannesring/Oasis
def save_checkpoint_solution_h5(tstep, q_, q_1, newfolder, u_components,
                                NS_parameters):
    """Overwrite solution in Checkpoint folder.

    For safety reasons, in case the solver is interrupted, take backup of
    solution first.

    Must be restarted using the same mesh-partitioning. This will be fixed
    soon. (MM)

    """
    checkpointfolder = path.join(newfolder, "Checkpoint")
    NS_parameters["num_processes"] = MPI.size(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        if path.exists(path.join(checkpointfolder, "params.dat")):
            system('cp {0} {1}'.format(
                path.join(checkpointfolder, "params.dat"),
                path.join(checkpointfolder, "params_old.dat")))
        f = open(path.join(checkpointfolder, "params.dat"), 'wb')
        pickle.dump(NS_parameters, f)

    MPI.barrier(MPI.comm_world)
    for ui in q_:
        h5file = path.join(checkpointfolder, ui + '.h5')
        oldfile = path.join(checkpointfolder, ui + '_old.h5')
        # For safety reasons...
        if path.exists(h5file):
            if MPI.rank(MPI.comm_world) == 0:
                system('cp {0} {1}'.format(h5file, oldfile))
        MPI.barrier(MPI.comm_world)
        ###
        newfile = HDF5File(MPI.comm_world, h5file, 'w')
        newfile.flush()
        newfile.write(q_[ui].vector(), '/current')
        if ui in u_components:
            newfile.write(q_1[ui].vector(), '/previous')
        if path.exists(oldfile):
            if MPI.rank(MPI.comm_world) == 0:
                system('rm {0}'.format(oldfile))
        MPI.barrier(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0 and path.exists(
            path.join(checkpointfolder, "params_old.dat")):
        system('rm {0}'.format(path.join(checkpointfolder, "params_old.dat")))
示例#35
0
def save_checkpoint_solution_h5(tstep, q_, q_1, newfolder, u_components,
                                NS_parameters):
    """Overwrite solution in Checkpoint folder.

    For safety reasons, in case the solver is interrupted, take backup of
    solution first.

    Must be restarted using the same mesh-partitioning. This will be fixed
    soon. (MM)

    """
    checkpointfolder = path.join(newfolder, "Checkpoint")
    NS_parameters["num_processes"] = MPI.size(MPI.comm_world)
    if MPI.rank(MPI.comm_world) == 0:
        if path.exists(path.join(checkpointfolder, "params.dat")):
            system('cp {0} {1}'.format(path.join(checkpointfolder, "params.dat"),
                                       path.join(checkpointfolder, "params_old.dat")))
        f = open(path.join(checkpointfolder, "params.dat"), 'wb')
        pickle.dump(NS_parameters,  f)

    MPI.barrier(MPI.comm_world)
    for ui in q_:
        h5file = path.join(checkpointfolder, ui + '.h5')
        oldfile = path.join(checkpointfolder, ui + '_old.h5')
        # For safety reasons...
        if path.exists(h5file):
            if MPI.rank(MPI.comm_world) == 0:
                system('cp {0} {1}'.format(h5file, oldfile))
        MPI.barrier(MPI.comm_world)
        ###
        newfile = HDF5File(MPI.comm_world, h5file, 'w')
        newfile.flush()
        newfile.write(q_[ui].vector(), '/current')
        if ui in u_components:
            newfile.write(q_1[ui].vector(), '/previous')
        if path.exists(oldfile):
            if MPI.rank(MPI.comm_world) == 0:
                system('rm {0}'.format(oldfile))
        MPI.barrier(MPI.comm_world)
        newfile.close()
    if MPI.rank(MPI.comm_world) == 0 and path.exists(path.join(checkpointfolder, "params_old.dat")):
        system('rm {0}'.format(path.join(checkpointfolder, "params_old.dat")))
    V = FunctionSpace(mesh, 'Lagrange', q)
    Vl = FunctionSpace(mesh, 'Lagrange', 1)
    Dt = h/(q*alpha*c)
    if myrank == 0: print '\n\th = {} - Dt = {}'.format(h, Dt)

    Wave = AcousticWave({'V':V, 'Vl':Vl, 'Vr':Vl})
    Wave.timestepper = 'backward'
    Wave.lump = True
    Wave.exact = interpolate(uex_expr, V)
    Wave.bc = DirichletBC(V, ubc, u0_boundary)
    Wave.update({'lambda':lam, 'rho':rho, 't0':0.0, 'tf':tf, 'Dt':Dt,\
    'u0init':interpolate(u0_expr, V), 'utinit':Function(V)})
    sol, error = Wave.solve()
    ERROR.append(error)
    if myrank == 0: print 'relative error = {:.5e}'.format(error)
    if not mycomm == None:  MPI.barrier(mycomm)

if myrank == 0:
    # Order of convergence:
    CONVORDER = []
    for ii in range(len(ERROR)-1):
        CONVORDER.append(np.log(ERROR[ii+1]/ERROR[ii])/np.log((1./NN[ii+1])/(1./NN[ii])))
    print '\n\norder of convergence:', CONVORDER

# Save plots:
try:
    boolplot = int(sys.argv[1])
except:
    boolplot = 0
if boolplot > 0:
    filename, ext = splitext(sys.argv[0])
示例#37
0
rho2 = Function(Vr)


try:
    myrun = int(sys.argv[1])
except:
    myrun = 2

if myrun == 1:
    weak_1 = lam1*inner(nabla_grad(trial), nabla_grad(test))*dx
    weak_2 = inner(lam2*nabla_grad(trial), nabla_grad(test))*dx
    weak_V = inner(lamV*nabla_grad(trial), nabla_grad(test))*dx

    lam1.vector()[:] = 1.0
    if mpirank == 0: print 'Start assembling K1'
    MPI.barrier(mpicomm)
    t0 = time()
    K1 = assemble(weak_1)
    MPI.barrier(mpicomm)
    t1 = time()
    if mpirank == 0: print 'Time to assemble K1 = {}'.format(t1-t0)

    lam2.vector()[:] = 1.0
    if mpirank == 0: print 'Start assembling K2'
    MPI.barrier(mpicomm)
    t0 = time()
    K2 = assemble(weak_2)
    MPI.barrier(mpicomm)
    t1 = time()
    if mpirank == 0: print 'Time to assemble K2 = {}'.format(t1-t0)