def test_shelve_save(mesh, casedir): mtf = MockTupleField(dict(save=True, save_as="shelve")) msf = MockScalarField(dict(save=True, save_as="shelve")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'shelve' in md["0"] assert md['saveformats'] == ['shelve'] md.close() # Read back data = shelve.open( os.path.join(pp.get_savedir(mf.name), mf.name + ".db"), 'r') for i in ["0", "1", "2"]: d = data[i] data.close() assert d == pp.get(mf.name)
def test_default_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True)) mvff = MockVectorFunctionField(V, dict(save=True)) mtf = MockTupleField(dict(save=True)) msf = MockScalarField(dict(save=True)) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff, mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mff, mvff]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".h5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".xdmf")) assert len(os.listdir(pp.get_savedir(mf.name))) == 4 md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md['saveformats'] assert 'xdmf' in md["0"] assert 'xdmf' in md['saveformats'] assert set(md['saveformats']) == set(['hdf5', 'xdmf']) md.close() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".txt")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'txt' in md["0"] assert 'txt' in md['saveformats'] assert 'shelve' in md["0"] assert 'shelve' in md['saveformats'] assert set(md['saveformats']) == set(['txt', 'shelve']) md.close()
def test_basic_replay(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1,0) V = spacepool.get_space(1,1) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q, dict(save=True)), MockVectorFunctionField(V, dict(save=True)) ]) replay_fields = lambda save: [Norm("MockFunctionField", dict(save=save)), Norm("MockVectorFunctionField", dict(save=save)), TimeIntegral("Norm_MockFunctionField", dict(save=save)),] rf_names = [f.name for f in replay_fields(False)] # Add fields, but don't save (for testing) pp.add_fields(replay_fields(False)) # Solutions to check against checks = {} pp.update_all({}, 0.0, 0) checks[0] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.1, 1) checks[1] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.2, 2) pp.finalize_all() checks[2] = dict([(name, pp.get(name)) for name in rf_names]) # Make sure that nothing is saved yet for name in rf_names: assert not os.path.isfile(os.path.join(pp.get_savedir(name), name+".db")) # ----------- Replay ----------------- pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q), MockVectorFunctionField(V), ]) # This time, save the fields pp.add_fields(replay_fields(True)) replayer = Replay(pp) replayer.replay() # Test that replayed solution is the same as computed in the original "solve" for name in rf_names: data = shelve.open(os.path.join(pp.get_savedir(name), name+".db"), 'r') for i in range(3): assert data.get(str(i), None) == checks[i][name] or abs(data.get(str(i), None) - checks[i][name]) < 1e-8 data.close()
def test_xmlgz_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="xml.gz")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="xml.gz")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "mesh.hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "0.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "1.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "2.xml.gz")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'xml.gz' in md["0"] assert 'xml.gz' in md["1"] assert 'xml.gz' in md["2"] assert 'xml.gz' in md['saveformats'] assert md['saveformats'] == ['xml.gz'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 # Read back for i in ["0", "1", "2"]: f = Function( FS, os.path.join(pp.get_savedir(mf.name), mf.name + i + ".xml.gz")) assert norm(f) == norm(pp.get(mf.name))
def test_hdf5_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="hdf5")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="hdf5")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md["1"] assert 'hdf5' in md["2"] assert 'hdf5' in md['saveformats'] assert md['saveformats'] == ['hdf5'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 2 # Read back hdf5file = HDF5File( mpi_comm_world(), os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5"), 'r') f = Function(FS) for i in ["0", "1", "2"]: hdf5file.read(f, mf.name + i) assert norm(f) == norm(pp.get(mf.name))
def test_playlog(casedir): pp = PostProcessor(dict(casedir=casedir)) # Test playlog assert not os.path.isfile(os.path.join(casedir, 'play.db')) MPI.barrier(mpi_comm_world()) pp.update_all({}, 0.0, 0) pp.finalize_all() playlog = pp.get_playlog('r') assert playlog == {"0": {"t": 0.0}} playlog.close() pp.update_all({}, 0.1, 1) pp.finalize_all() playlog = pp.get_playlog('r') assert playlog == {"0": {"t": 0.0}, "1": {"t": 0.1}} playlog.close()
def test_finalize_all(casedir): pp = PostProcessor(dict(casedir=casedir)) velocity = MockVelocity(dict(finalize=True)) pressure = MockPressure() pp.add_fields([velocity, pressure]) pp.get("MockVelocity") pp.get("MockPressure") # Nothing finalized yet assert pp._finalized == {} assert velocity.finalized is False # finalize_all should finalize velocity only pp.finalize_all() assert pp._finalized == {"MockVelocity": "u"} assert velocity.finalized is True # Still able to get it assert pp.get("MockVelocity") == "u"
def replay(self): "Replay problem with given postprocessor." # Backup play log self.backup_playlog() # Set up for replay replay_plan = self._fetch_history() postprocessors = [] for fieldname, field in self.postproc._fields.items(): if not (field.params.save or field.params.plot): continue # Check timesteps covered by current field keys = self._check_field_coverage(replay_plan, fieldname) # Get the time dependency for the field t_dep = min( [dep[1] for dep in self.postproc._dependencies[fieldname]] + [0]) dep_fields = [] for dep in self.postproc._full_dependencies[fieldname]: if dep[0] in ["t", "timestep"]: continue if dep[0] in dep_fields: continue # Copy dependency and set save/plot to False. If dependency should be # plotted/saved, this field will be added separately. dependency = self.postproc._fields[dep[0]] dependency = copy.copy(dependency) dependency.params.save = False dependency.params.plot = False dependency.params.safe = False dep_fields.append(dependency) added_to_postprocessor = False for i, (ppkeys, ppt_dep, pp) in enumerate(postprocessors): if t_dep == ppt_dep and set(keys) == set(ppkeys): pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") added_to_postprocessor = True break else: continue # Create new postprocessor if no suitable postprocessor found if not added_to_postprocessor: pp = PostProcessor(self.postproc.params, self.postproc._timer) pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") postprocessors.append([keys, t_dep, pp]) postprocessors = sorted(postprocessors, key=itemgetter(1), reverse=True) t_independent_fields = [] for fieldname in self.postproc._fields: if self.postproc._full_dependencies[fieldname] == []: t_independent_fields.append(fieldname) elif min(t for dep, t in self.postproc._full_dependencies[fieldname]) == 0: t_independent_fields.append(fieldname) # Run replay sorted_keys = sorted(replay_plan.keys()) N = max(sorted_keys) for timestep in sorted_keys: cbc_print("Processing timestep %d of %d. %.3f%% complete." % (timestep, N, 100.0 * (timestep) / N)) # Load solution at this timestep (all available fields) solution = replay_plan[timestep] t = solution.pop("t") # Cycle through postprocessors and update if required for ppkeys, ppt_dep, pp in postprocessors: if timestep in ppkeys: # Add dummy solutions to avoid error when handling dependencies # We know this should work, because it has already been established that # the fields to be computed at this timestep can be computed from stored # solutions. for field in pp._sorted_fields_keys: for dep in reversed(pp._dependencies[field]): if not have_necessary_deps(solution, pp, dep[0]): solution[dep[0]] = lambda: None pp.update_all(solution, t, timestep) # Clear None-objects from solution [ solution.pop(k) for k in solution.keys() if not solution[k] ] # Update solution to avoid re-computing data for fieldname, value in pp._cache[0].items(): if fieldname in t_independent_fields: value = pp._cache[0][fieldname] #solution[fieldname] = lambda value=value: value # Memory leak! solution[fieldname] = MiniCallable(value) self.timer.increment() if self.params.check_memory_frequency != 0 and timestep % self.params.check_memory_frequency == 0: cbc_print('Memory usage is: %s' % MPI.sum(mpi_comm_world(), get_memory_usage())) # Clean up solution: Required to avoid memory leak for some reason... for f, v in solution.items(): if isinstance(v, MiniCallable): v.value = None del v solution.pop(f) for ppkeys, ppt_dep, pp in postprocessors: pp.finalize_all()
def test_pvd_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="pvd")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="pvd")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".pvd")) if MPI.size(mpi_comm_world()) == 1: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 2)) else: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 2)) for i in range(MPI.size(mpi_comm_world())): assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 0))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 1))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 2))) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'pvd' in md["0"] assert 'pvd' in md["1"] assert 'pvd' in md["2"] assert md['saveformats'] == ['pvd'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 + int( MPI.size(mpi_comm_world()) != 1) * MPI.size(mpi_comm_world()) * 3