def test_shelve_save(mesh, casedir): mtf = MockTupleField(dict(save=True, save_as="shelve")) msf = MockScalarField(dict(save=True, save_as="shelve")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'shelve' in md["0"] assert md['saveformats'] == ['shelve'] md.close() # Read back data = shelve.open( os.path.join(pp.get_savedir(mf.name), mf.name + ".db"), 'r') for i in ["0", "1", "2"]: d = data[i] data.close() assert d == pp.get(mf.name)
def test_default_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True)) mvff = MockVectorFunctionField(V, dict(save=True)) mtf = MockTupleField(dict(save=True)) msf = MockScalarField(dict(save=True)) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff, mtf, msf]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf in [mff, mvff]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".h5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".xdmf")) assert len(os.listdir(pp.get_savedir(mf.name))) == 4 md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md['saveformats'] assert 'xdmf' in md["0"] assert 'xdmf' in md['saveformats'] assert set(md['saveformats']) == set(['hdf5', 'xdmf']) md.close() for mf in [mtf, msf]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".txt")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'txt' in md["0"] assert 'txt' in md['saveformats'] assert 'shelve' in md["0"] assert 'shelve' in md['saveformats'] assert set(md['saveformats']) == set(['txt', 'shelve']) md.close()
def main(): problem = Poiseuille3D({"refinement_level": 0}) scheme = IPCS({"u_degree": 1}) casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def main(): problem = LidDrivenCavity({"refinement_level": 1}) scheme = IPCS({"u_degree": 2, "solver_p_neumann": ("cg", "ilu")}) # Displays pressure oscillations casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def main(): problem = Womersley3D({"refinement_level": 2}) scheme = IPCS() casedir = "results_demo_%s_%s_%d" % (problem.shortname(), scheme.shortname(), problem.params.refinement_level) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def main(): problem = Channel() scheme = IPCS() casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def main(): problem = PipeAneurysm() scheme = IPCS() casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def test_xmlgz_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="xml.gz")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="xml.gz")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "mesh.hdf5")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "0.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "1.xml.gz")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "2.xml.gz")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'xml.gz' in md["0"] assert 'xml.gz' in md["1"] assert 'xml.gz' in md["2"] assert 'xml.gz' in md['saveformats'] assert md['saveformats'] == ['xml.gz'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 # Read back for i in ["0", "1", "2"]: f = Function( FS, os.path.join(pp.get_savedir(mf.name), mf.name + i + ".xml.gz")) assert norm(f) == norm(pp.get(mf.name))
def main(): set_log_level(100) problem = Poiseuille2D({"dt": 1e-3, "T": 1e-1, "num_periods": None, "refinement_level": 1}) scheme = IPCS_Naive({"u_degree": 2}) casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def test_hdf5_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="hdf5")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="hdf5")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5")) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'hdf5' in md["0"] assert 'hdf5' in md["1"] assert 'hdf5' in md["2"] assert 'hdf5' in md['saveformats'] assert md['saveformats'] == ['hdf5'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 2 # Read back hdf5file = HDF5File( mpi_comm_world(), os.path.join(pp.get_savedir(mf.name), mf.name + ".hdf5"), 'r') f = Function(FS) for i in ["0", "1", "2"]: hdf5file.read(f, mf.name + i) assert norm(f) == norm(pp.get(mf.name))
def test_add_field(): pp = PostProcessor() pp.add_field(SolutionField("foo")) assert "foo" in pp._fields.keys() pp += SolutionField("bar") assert set(["foo", "bar"]) == set(pp._fields.keys()) pp += [SolutionField("a"), SolutionField("b")] assert set(["foo", "bar", "a", "b"]) == set(pp._fields.keys()) pp.add_fields([ MetaField("foo"), MetaField2("foo", "bar"), ]) assert set(["foo", "bar", "a", "b", "MetaField_foo", "MetaField2_foo_bar"]) == set(pp._fields.keys())
def test_basic_replay(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1,0) V = spacepool.get_space(1,1) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q, dict(save=True)), MockVectorFunctionField(V, dict(save=True)) ]) replay_fields = lambda save: [Norm("MockFunctionField", dict(save=save)), Norm("MockVectorFunctionField", dict(save=save)), TimeIntegral("Norm_MockFunctionField", dict(save=save)),] rf_names = [f.name for f in replay_fields(False)] # Add fields, but don't save (for testing) pp.add_fields(replay_fields(False)) # Solutions to check against checks = {} pp.update_all({}, 0.0, 0) checks[0] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.1, 1) checks[1] = dict([(name, pp.get(name)) for name in rf_names]) pp.update_all({}, 0.2, 2) pp.finalize_all() checks[2] = dict([(name, pp.get(name)) for name in rf_names]) # Make sure that nothing is saved yet for name in rf_names: assert not os.path.isfile(os.path.join(pp.get_savedir(name), name+".db")) # ----------- Replay ----------------- pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([ MockFunctionField(Q), MockVectorFunctionField(V), ]) # This time, save the fields pp.add_fields(replay_fields(True)) replayer = Replay(pp) replayer.replay() # Test that replayed solution is the same as computed in the original "solve" for name in rf_names: data = shelve.open(os.path.join(pp.get_savedir(name), name+".db"), 'r') for i in range(3): assert data.get(str(i), None) == checks[i][name] or abs(data.get(str(i), None) - checks[i][name]) < 1e-8 data.close()
def test_compute_calls(): pressure = MockPressure() velocity = MockVelocity() Du = MockVelocityGradient() epsilon = MockStrain() sigma = MockStress() # Add fields to postprocessor pp = PostProcessor() pp.add_fields([pressure, velocity, Du, epsilon, sigma]) # Nothing has been computed yet assert velocity.touched == 0 assert Du.touched == 0 assert epsilon.touched == 0 assert pressure.touched == 0 assert sigma.touched == 0 # Get strain twice for i in range(2): strain = pp.get("MockStrain") # Check value assert strain == "epsilon(grad(u))" # Check the right things are computed but only the first time assert velocity.touched == 1 # Only increased first iteration! assert Du.touched == 1 # ... assert epsilon.touched == 1 # ... assert pressure.touched == 0 # Not computed! assert sigma.touched == 0 # ... # Get stress twice for i in range(2): stress = pp.get("MockStress") # Check value assert stress == "sigma(epsilon(grad(u)), p)" # Check the right things are computed but only the first time assert velocity.touched == 1 # Not recomputed! assert Du.touched == 1 # ... assert epsilon.touched == 1 # ... assert pressure.touched == 1 # Only increased first iteration! assert sigma.touched == 1 # ...
def test_finalize_all(casedir): pp = PostProcessor(dict(casedir=casedir)) velocity = MockVelocity(dict(finalize=True)) pressure = MockPressure() pp.add_fields([velocity, pressure]) pp.get("MockVelocity") pp.get("MockPressure") # Nothing finalized yet assert pp._finalized == {} assert velocity.finalized is False # finalize_all should finalize velocity only pp.finalize_all() assert pp._finalized == {"MockVelocity": "u"} assert velocity.finalized is True # Still able to get it assert pp.get("MockVelocity") == "u"
def main(): set_log_level(100) problem = Poiseuille2D({ "dt": 1e-3, "T": 1e-1, "num_periods": None, "refinement_level": 1 }) scheme = IPCS_Naive({"u_degree": 2}) casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) plot_and_save = dict(plot=True, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] postproc = PostProcessor({"casedir": casedir}) postproc.add_fields(fields) solver = NSSolver(problem, scheme, postproc) solver.solve()
def test_update_all(): pressure = SolutionField("MockPressure") #MockPressure() velocity = SolutionField("MockVelocity") #MockVelocity() Du = MockVelocityGradient() epsilon = MockStrain(dict(start_timestep=3)) sigma = MockStress(dict(start_time=0.5, end_time=0.8)) # Add fields to postprocessor pp = PostProcessor() pp.add_fields([pressure, velocity, Du, epsilon, sigma]) N = 11 T = [(i, float(i) / (N - 1)) for i in xrange(N)] for timestep, t in T: pp.update_all( { "MockPressure": lambda: "p" + str(timestep), "MockVelocity": lambda: "u" + str(timestep) }, t, timestep) assert Du.touched == timestep + 1 assert pp._cache[0]["MockPressure"] == "p%d" % timestep assert pp._cache[0]["MockVelocity"] == "u%d" % timestep assert pp._cache[0]["MockVelocityGradient"] == "grad(u%d)" % timestep if timestep >= 3: assert pp._cache[0][ "MockStrain"] == "epsilon(grad(u%d))" % timestep else: assert "MockStrain" not in pp._cache[0] if 0.5 <= t <= 0.8: assert pp._cache[0][ "MockStress"] == "sigma(epsilon(grad(u%d)), p%d)" % (timestep, timestep) else: assert "MockStress" not in pp._cache[0]
def main(): # Create problem and scheme instances problem = FlowAroundCylinder({"refinement_level": 2}) scheme = IPCS_Stable() # Create postprocessor instance pointing to a case directory casedir = "results_demo_%s_%s" % (problem.shortname(), scheme.shortname()) postprocessor = PostProcessor({"casedir": casedir}) # Creating fields to plot and save plot_and_save = dict(plot=False, save=True) fields = [ Pressure(plot_and_save), Velocity(plot_and_save), ] #StreamFunction(plot_and_save), #] # Add fields to postprocessor postprocessor.add_fields(fields) # Create NSSolver instance and solve problem solver = NSSolver(problem, scheme, postprocessor) solver.solve()
def test_pvd_save(mesh, casedir): spacepool = SpacePool(mesh) Q = spacepool.get_space(1, 0) V = spacepool.get_space(1, 1) mff = MockFunctionField(Q, dict(save=True, save_as="pvd")) mvff = MockVectorFunctionField(V, dict(save=True, save_as="pvd")) pp = PostProcessor(dict(casedir=casedir)) pp.add_fields([mff, mvff]) pp.update_all({}, 0.0, 0) pp.update_all({}, 0.1, 1) pp.update_all({}, 0.2, 2) pp.finalize_all() for mf, FS in [(mff, Q), (mvff, V)]: assert os.path.isdir(pp.get_savedir(mf.name)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), "metadata.db")) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + ".pvd")) if MPI.size(mpi_comm_world()) == 1: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.vtu" % 2)) else: assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 0)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 1)) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "%0.6d.pvtu" % 2)) for i in range(MPI.size(mpi_comm_world())): assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 0))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 1))) assert os.path.isfile( os.path.join(pp.get_savedir(mf.name), mf.name + "_p%d_%0.6d.vtu" % (i, 2))) md = shelve.open(os.path.join(pp.get_savedir(mf.name), "metadata.db"), 'r') assert 'pvd' in md["0"] assert 'pvd' in md["1"] assert 'pvd' in md["2"] assert md['saveformats'] == ['pvd'] md.close() assert len(os.listdir(pp.get_savedir(mf.name))) == 1 + 1 + 3 + int( MPI.size(mpi_comm_world()) != 1) * MPI.size(mpi_comm_world()) * 3
def replay(self): "Replay problem with given postprocessor." # Backup play log self.backup_playlog() # Set up for replay replay_plan = self._fetch_history() postprocessors = [] for fieldname, field in self.postproc._fields.items(): if not (field.params.save or field.params.plot): continue # Check timesteps covered by current field keys = self._check_field_coverage(replay_plan, fieldname) # Get the time dependency for the field t_dep = min( [dep[1] for dep in self.postproc._dependencies[fieldname]] + [0]) dep_fields = [] for dep in self.postproc._full_dependencies[fieldname]: if dep[0] in ["t", "timestep"]: continue if dep[0] in dep_fields: continue # Copy dependency and set save/plot to False. If dependency should be # plotted/saved, this field will be added separately. dependency = self.postproc._fields[dep[0]] dependency = copy.copy(dependency) dependency.params.save = False dependency.params.plot = False dependency.params.safe = False dep_fields.append(dependency) added_to_postprocessor = False for i, (ppkeys, ppt_dep, pp) in enumerate(postprocessors): if t_dep == ppt_dep and set(keys) == set(ppkeys): pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") added_to_postprocessor = True break else: continue # Create new postprocessor if no suitable postprocessor found if not added_to_postprocessor: pp = PostProcessor(self.postproc.params, self.postproc._timer) pp.add_fields(dep_fields, exists_reaction="ignore") pp.add_field(field, exists_reaction="replace") postprocessors.append([keys, t_dep, pp]) postprocessors = sorted(postprocessors, key=itemgetter(1), reverse=True) t_independent_fields = [] for fieldname in self.postproc._fields: if self.postproc._full_dependencies[fieldname] == []: t_independent_fields.append(fieldname) elif min(t for dep, t in self.postproc._full_dependencies[fieldname]) == 0: t_independent_fields.append(fieldname) # Run replay sorted_keys = sorted(replay_plan.keys()) N = max(sorted_keys) for timestep in sorted_keys: cbc_print("Processing timestep %d of %d. %.3f%% complete." % (timestep, N, 100.0 * (timestep) / N)) # Load solution at this timestep (all available fields) solution = replay_plan[timestep] t = solution.pop("t") # Cycle through postprocessors and update if required for ppkeys, ppt_dep, pp in postprocessors: if timestep in ppkeys: # Add dummy solutions to avoid error when handling dependencies # We know this should work, because it has already been established that # the fields to be computed at this timestep can be computed from stored # solutions. for field in pp._sorted_fields_keys: for dep in reversed(pp._dependencies[field]): if not have_necessary_deps(solution, pp, dep[0]): solution[dep[0]] = lambda: None pp.update_all(solution, t, timestep) # Clear None-objects from solution [ solution.pop(k) for k in solution.keys() if not solution[k] ] # Update solution to avoid re-computing data for fieldname, value in pp._cache[0].items(): if fieldname in t_independent_fields: value = pp._cache[0][fieldname] #solution[fieldname] = lambda value=value: value # Memory leak! solution[fieldname] = MiniCallable(value) self.timer.increment() if self.params.check_memory_frequency != 0 and timestep % self.params.check_memory_frequency == 0: cbc_print('Memory usage is: %s' % MPI.sum(mpi_comm_world(), get_memory_usage())) # Clean up solution: Required to avoid memory leak for some reason... for f, v in solution.items(): if isinstance(v, MiniCallable): v.value = None del v solution.pop(f) for ppkeys, ppt_dep, pp in postprocessors: pp.finalize_all()
def test_run_problem(problem_factory, scheme_factory, refinement_level, dt): problem = problem_factory(refinement_level, dt) scheme = scheme_factory() print print "."*100 print "** Problem: %16s ** Solver: %16s" % (problem.shortname(), scheme.shortname()) print "** Refinement level: %2d ** dt: %.8f" % (refinement_level, dt) print "**" pp = PostProcessor({"casedir": "test"}) test_fields = problem.test_fields() pp.add_fields(test_fields) solver = NSSolver(problem, scheme, pp) # Define variables values = {f.name: 1e16 for f in test_fields} num_dofs = 0 T = 0 # Disable printing from solve # TODO: Move to NSSolver/set option original_stdout = sys.stdout sys.stdout = NoOutput() try: t1 = time.time() ns = solver.solve() t2 = time.time() T = t2-t1 spaces = ns["spaces"] t = float(ns["t"]) num_dofs = spaces.V.dim()+spaces.Q.dim() references = problem.test_references(spaces, t) if references: assert len(references) == len(test_fields) for field, ref in zip(test_fields, references): value = pp.get(field.name) values[field.name] = l2norm(value, ref) else: for field in test_fields: value = float(pp.get(field.name)) # Only support scalar values in reference data values[field.name] = value except RuntimeError as re: print re.message # Enable printing again, and print values sys.stdout = original_stdout print "** dofs: %d Time spent: %f" % (num_dofs , T) #assert values, "No values calculated. Solver most likely failed." if all([v==1e16 for v in values.values()]): print "No values calculated. Solver most likely failed." for tfname, err in values.items(): print "**** Fieldname: %20s ** Error: %.8e" % (tfname, err) # Store solve metadata metadata = {} metadata["scheme"] = {} metadata["scheme"]["name"] = scheme.shortname() metadata["scheme"]["params"] = scheme.params metadata["problem"] = {} metadata["problem"]["name"] = problem.shortname() metadata["problem"]["params"] = problem.params metadata["num_dofs"] = num_dofs metadata["time"] = T # Find hash from problem and scheme name+parameters hash = sha1() hash.update(str(metadata["scheme"])) hash.update(str(metadata["problem"])) filename = hash.hexdigest() + ".db" # Always write to output write_output_data(filename, metadata, values) # Read reference data values ref_metadata, ref_values = read_reference_data(filename) if ref_values == {}: print "WARNING: Found no reference data" return assert ref_values != {}, "Found no reference data!" # Check each value against reference for key in values: if key in ref_values: # Compute absolute and relative errors abs_err = abs(values[key] - ref_values[key]) if abs(ref_values[key]) > 1e-12: err = abs_err / abs(ref_values[key]) else: err = abs_err # TODO: Find necessary condition of this check! # This one should be chosen such that it always passes when nothing has changed strict_tolerance = 1e-8 if err > strict_tolerance: msg = "Error not matching reference with tolerance %e:\n key=%s, error=%e, ref_error=%e diff=%e, relative=%e" % ( strict_tolerance, key, values[key], ref_values[key], abs_err, err) print msg # This one should be chosen so that it passes when we're happy loose_tolerance = 1e-3 assert err < loose_tolerance # After comparing what we can, check that we have references for everything we computed assert set(values.keys()) == set(ref_values.keys()), "Value keys computed and in references are different."
def test_run_problem(problem_factory, scheme_factory, refinement_level, dt): problem = problem_factory(refinement_level, dt) scheme = scheme_factory() print print "." * 100 print "** Problem: %16s ** Solver: %16s" % (problem.shortname(), scheme.shortname()) print "** Refinement level: %2d ** dt: %.8f" % (refinement_level, dt) print "**" pp = PostProcessor({"casedir": "test"}) test_fields = problem.test_fields() pp.add_fields(test_fields) solver = NSSolver(problem, scheme, pp) # Define variables values = {f.name: 1e16 for f in test_fields} num_dofs = 0 T = 0 # Disable printing from solve # TODO: Move to NSSolver/set option original_stdout = sys.stdout sys.stdout = NoOutput() try: t1 = time.time() ns = solver.solve() t2 = time.time() T = t2 - t1 spaces = ns["spaces"] t = float(ns["t"]) num_dofs = spaces.V.dim() + spaces.Q.dim() references = problem.test_references(spaces, t) if references: assert len(references) == len(test_fields) for field, ref in zip(test_fields, references): value = pp.get(field.name) values[field.name] = l2norm(value, ref) else: for field in test_fields: value = float(pp.get(field.name) ) # Only support scalar values in reference data values[field.name] = value except RuntimeError as re: print re.message # Enable printing again, and print values sys.stdout = original_stdout print "** dofs: %d Time spent: %f" % (num_dofs, T) #assert values, "No values calculated. Solver most likely failed." if all([v == 1e16 for v in values.values()]): print "No values calculated. Solver most likely failed." for tfname, err in values.items(): print "**** Fieldname: %20s ** Error: %.8e" % (tfname, err) # Store solve metadata metadata = {} metadata["scheme"] = {} metadata["scheme"]["name"] = scheme.shortname() metadata["scheme"]["params"] = scheme.params metadata["problem"] = {} metadata["problem"]["name"] = problem.shortname() metadata["problem"]["params"] = problem.params metadata["num_dofs"] = num_dofs metadata["time"] = T # Find hash from problem and scheme name+parameters hash = sha1() hash.update(str(metadata["scheme"])) hash.update(str(metadata["problem"])) filename = hash.hexdigest() + ".db" # Always write to output write_output_data(filename, metadata, values) # Read reference data values ref_metadata, ref_values = read_reference_data(filename) if ref_values == {}: print "WARNING: Found no reference data" return assert ref_values != {}, "Found no reference data!" # Check each value against reference for key in values: if key in ref_values: # Compute absolute and relative errors abs_err = abs(values[key] - ref_values[key]) if abs(ref_values[key]) > 1e-12: err = abs_err / abs(ref_values[key]) else: err = abs_err # TODO: Find necessary condition of this check! # This one should be chosen such that it always passes when nothing has changed strict_tolerance = 1e-8 if err > strict_tolerance: msg = "Error not matching reference with tolerance %e:\n key=%s, error=%e, ref_error=%e diff=%e, relative=%e" % ( strict_tolerance, key, values[key], ref_values[key], abs_err, err) print msg # This one should be chosen so that it passes when we're happy loose_tolerance = 1e-3 assert err < loose_tolerance # After comparing what we can, check that we have references for everything we computed assert set(values.keys()) == set(ref_values.keys( )), "Value keys computed and in references are different."