def test_shelve_save(mesh, casedir): mtf = MockTupleField(dict(save=True, save_as="shelve")) msf = MockScalarField(dict(save=True, save_as="shelve")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'shelve' in md["0"] assert md['saveformats'] == ['shelve'] md.close() # Read back data = shelve.open( os.path.join(pp.get_savedir(mf.name), mf.name + ".db"), 'r') for i in ["0", "1", "2"]: d = data[i] data.close() assert d == pp.get(mf.name)
def test_pyplot(): pp = PostProcessor() pp.add_field(MockScalarField(dict(plot=True))) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.6, 2) pp.update_all({}, 1.6, 3)
def test_default_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True)) mvff = MockVectorFunctionField(V, dict(save=True)) mtf = MockTupleField(dict(save=True)) msf = MockScalarField(dict(save=True)) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff, mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mff, mvff]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".h5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".xdmf")) assert len(os.listdir(pp.get_savedir(mf.name))) == 4 md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md['saveformats'] assert 'xdmf' in md["0"] assert 'xdmf' in md['saveformats'] assert set(md['saveformats']) == set(['hdf5', 'xdmf']) md.close() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".txt")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'txt' in md["0"] assert 'txt' in md['saveformats'] assert 'shelve' in md["0"] assert 'shelve' in md['saveformats'] assert set(md['saveformats']) == set(['txt', 'shelve']) md.close()
def test_get_casedir(casedir): pp = PostProcessor(dict(casedir=casedir)) assert os.path.isdir(pp.get_casedir()) assert os.path.samefile(pp.get_casedir(), casedir) pp.update_all({}, 0.0, 0) assert len(os.listdir(pp.get_casedir())) == 1 pp.clean_casedir() assert len(os.listdir(pp.get_casedir())) == 0
def test_dolfinplot(mesh): pp = PostProcessor() spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) pp.add_field(MockFunctionField(Q, dict(plot=True))) pp.add_field(MockVectorFunctionField(V, dict(plot=True))) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.6, 2) pp.update_all({}, 1.6, 3)
def test_xmlgz_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="xml.gz")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="xml.gz")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "mesh.hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "0.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "1.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "2.xml.gz")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'xml.gz' in md["0"] assert 'xml.gz' in md["1"] assert 'xml.gz' in md["2"] assert 'xml.gz' in md['saveformats'] assert md['saveformats'] == ['xml.gz'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 # Read back for i in ["0", "1", "2"]: f = Function( FS, os.path.join(pp.get_savedir(mf.name), mf.name + i + ".xml.gz")) assert norm(f) == norm(pp.get(mf.name))
def test_hdf5_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="hdf5")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="hdf5")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md["1"] assert 'hdf5' in md["2"] assert 'hdf5' in md['saveformats'] assert md['saveformats'] == ['hdf5'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 2 # Read back hdf5file = HDF5File( mpi_comm_world(), os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5"), 'r') f = Function(FS) for i in ["0", "1", "2"]: hdf5file.read(f, mf.name + i) assert norm(f) == norm(pp.get(mf.name))
def test_playlog(casedir): pp = PostProcessor(dict(casedir=casedir)) # Test playlog assert not os.path.isfile(os.path.join(casedir, 'play.db')) MPI.barrier(mpi_comm_world()) pp.update_all({}, 0.0, 0) pp.finalize_all() playlog = pp.get_playlog('r') assert playlog == {"0": {"t": 0.0}} playlog.close() pp.update_all({}, 0.1, 1) pp.finalize_all() playlog = pp.get_playlog('r') assert playlog == {"0": {"t": 0.0}, "1": {"t": 0.1}} playlog.close()
def test_update_all(): pressure = SolutionField("MockPressure") #MockPressure() velocity = SolutionField("MockVelocity") #MockVelocity() Du = MockVelocityGradient() epsilon = MockStrain(dict(start_timestep=3)) sigma = MockStress(dict(start_time=0.5, end_time=0.8)) # Add fields to postprocessor pp = PostProcessor() pp.add_fields([pressure, velocity, Du, epsilon, sigma]) N = 11 T = [(i, float(i) / (N - 1)) for i in xrange(N)] for timestep, t in T: pp.update_all( { "MockPressure": lambda: "p" + str(timestep), "MockVelocity": lambda: "u" + str(timestep) }, t, timestep) assert Du.touched == timestep + 1 assert pp._cache[0]["MockPressure"] == "p%d" % timestep assert pp._cache[0]["MockVelocity"] == "u%d" % timestep assert pp._cache[0]["MockVelocityGradient"] == "grad(u%d)" % timestep if timestep >= 3: assert pp._cache[0][ "MockStrain"] == "epsilon(grad(u%d))" % timestep else: assert "MockStrain" not in pp._cache[0] if 0.5 <= t <= 0.8: assert pp._cache[0][ "MockStress"] == "sigma(epsilon(grad(u%d)), p%d)" % (timestep, timestep) else: assert "MockStress" not in pp._cache[0]
def test_basic_replay(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1,0) V = spacepool.get_space(1,1) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q, dict(save=True)), MockVectorFunctionField(V, dict(save=True)) ]) replay_fields = lambda save: [Norm("MockFunctionField", dict(save=save)), Norm("MockVectorFunctionField", dict(save=save)), TimeIntegral("Norm_MockFunctionField", dict(save=save)),] rf_names = [f.name for f in replay_fields(False)] # Add fields, but don't save (for testing) pp.add_fields(replay_fields(False)) # Solutions to check against checks = {} pp.update_all({}, 0.0, 0) checks[0] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.1, 1) checks[1] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.2, 2) pp.finalize_all() checks[2] = dict([(name, pp.get(name)) for name in rf_names]) # Make sure that nothing is saved yet for name in rf_names: assert not os.path.isfile(os.path.join(pp.get_savedir(name), name+".db")) # ----------- Replay ----------------- pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q), MockVectorFunctionField(V), ]) # This time, save the fields pp.add_fields(replay_fields(True)) replayer = Replay(pp) replayer.replay() # Test that replayed solution is the same as computed in the original "solve" for name in rf_names: data = shelve.open(os.path.join(pp.get_savedir(name), name+".db"), 'r') for i in range(3): assert data.get(str(i), None) == checks[i][name] or abs(data.get(str(i), None) - checks[i][name]) < 1e-8 data.close()
def replay(self): "Replay problem with given postprocessor." # Backup play log self.backup_playlog() # Set up for replay replay_plan = self._fetch_history() postprocessors = [] for fieldname, field in self.postproc._fields.items(): if not (field.params.save or field.params.plot): continue # Check timesteps covered by current field keys = self._check_field_coverage(replay_plan, fieldname) # Get the time dependency for the field t_dep = min( [dep[1] for dep in self.postproc._dependencies[fieldname]] + [0]) dep_fields = [] for dep in self.postproc._full_dependencies[fieldname]: if dep[0] in ["t", "timestep"]: continue if dep[0] in dep_fields: continue # Copy dependency and set save/plot to False. If dependency should be # plotted/saved, this field will be added separately. dependency = self.postproc._fields[dep[0]] dependency = copy.copy(dependency) dependency.params.save = False dependency.params.plot = False dependency.params.safe = False dep_fields.append(dependency) added_to_postprocessor = False for i, (ppkeys, ppt_dep, pp) in enumerate(postprocessors): if t_dep == ppt_dep and set(keys) == set(ppkeys): pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") added_to_postprocessor = True break else: continue # Create new postprocessor if no suitable postprocessor found if not added_to_postprocessor: pp = PostProcessor(self.postproc.params, self.postproc._timer) pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") postprocessors.append([keys, t_dep, pp]) postprocessors = sorted(postprocessors, key=itemgetter(1), reverse=True) t_independent_fields = [] for fieldname in self.postproc._fields: if self.postproc._full_dependencies[fieldname] == []: t_independent_fields.append(fieldname) elif min(t for dep, t in self.postproc._full_dependencies[fieldname]) == 0: t_independent_fields.append(fieldname) # Run replay sorted_keys = sorted(replay_plan.keys()) N = max(sorted_keys) for timestep in sorted_keys: cbc_print("Processing timestep %d of %d. %.3f%% complete." % (timestep, N, 100.0 * (timestep) / N)) # Load solution at this timestep (all available fields) solution = replay_plan[timestep] t = solution.pop("t") # Cycle through postprocessors and update if required for ppkeys, ppt_dep, pp in postprocessors: if timestep in ppkeys: # Add dummy solutions to avoid error when handling dependencies # We know this should work, because it has already been established that # the fields to be computed at this timestep can be computed from stored # solutions. for field in pp._sorted_fields_keys: for dep in reversed(pp._dependencies[field]): if not have_necessary_deps(solution, pp, dep[0]): solution[dep[0]] = lambda: None pp.update_all(solution, t, timestep) # Clear None-objects from solution [ solution.pop(k) for k in solution.keys() if not solution[k] ] # Update solution to avoid re-computing data for fieldname, value in pp._cache[0].items(): if fieldname in t_independent_fields: value = pp._cache[0][fieldname] #solution[fieldname] = lambda value=value: value # Memory leak! solution[fieldname] = MiniCallable(value) self.timer.increment() if self.params.check_memory_frequency != 0 and timestep % self.params.check_memory_frequency == 0: cbc_print('Memory usage is: %s' % MPI.sum(mpi_comm_world(), get_memory_usage())) # Clean up solution: Required to avoid memory leak for some reason... for f, v in solution.items(): if isinstance(v, MiniCallable): v.value = None del v solution.pop(f) for ppkeys, ppt_dep, pp in postprocessors: pp.finalize_all()
def run_splitting_solver(domain, dt, T): # Create cardiac model problem description cell_model = Tentusscher_panfilov_2006_epi_cell() heart = setup_model(cell_model, domain) # Customize and create monodomain solver ps = SplittingSolver.default_parameters() ps["pde_solver"] = "monodomain" ps["apply_stimulus_current_to_pde"] = True # 2nd order splitting scheme ps["theta"] = 0.5 # Use explicit first-order Rush-Larsen scheme for the ODEs ps["ode_solver_choice"] = "CardiacODESolver" ps["CardiacODESolver"]["scheme"] = "RL1" # Crank-Nicolson discretization for PDEs in time: ps["MonodomainSolver"]["theta"] = 0.5 ps["MonodomainSolver"]["linear_solver_type"] = "iterative" ps["MonodomainSolver"]["algorithm"] = "cg" ps["MonodomainSolver"]["preconditioner"] = "petsc_amg" # Create solver solver = SplittingSolver(heart, params=ps) # Extract the solution fields and set the initial conditions (vs_, vs, vur) = solver.solution_fields() vs_.assign(cell_model.initial_conditions()) solutions = solver.solve((0, T), dt) postprocessor = PostProcessor(dict(casedir="test", clean_casedir=True)) postprocessor.store_mesh(heart.domain()) field_params = dict( save=True, save_as=["hdf5", "xdmf"], plot=False, start_timestep=-1, stride_timestep=1 ) postprocessor.add_field(SolutionField("v", field_params)) theta = ps["theta"] # Solve total = Timer("XXX Total cbcbeat solver time") for i, (timestep, (vs_, vs, vur)) in enumerate(solutions): t0, t1 = timestep current_t = t0 + theta*(t1 - t0) postprocessor.update_all({"v": lambda: vur}, current_t, i) print("Solving on %s" % str(timestep)) # Print memory usage (just for the fun of it) print(memory_usage()) total.stop() # Plot result (as sanity check) #plot(vs[0], interactive=True) # Stop timer and list timings if MPI.rank(mpi_comm_world()) == 0: list_timings(TimingClear_keep, [TimingType_wall])
def test_pvd_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="pvd")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="pvd")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".pvd")) if MPI.size(mpi_comm_world()) == 1: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 2)) else: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 2)) for i in range(MPI.size(mpi_comm_world())): assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 0))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 1))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 2))) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'pvd' in md["0"] assert 'pvd' in md["1"] assert 'pvd' in md["2"] assert md['saveformats'] == ['pvd'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 + int( MPI.size(mpi_comm_world()) != 1) * MPI.size(mpi_comm_world()) * 3