def _do_dim(self, dim, min_diff, max_diff): from pyphare.pharein.simulation import valid_refined_particle_nbr for interp in range(1, 4): prev_split_particle_max = 0 for refined_particle_nbr in valid_refined_particle_nbr[dim][interp]: self._check_deltas_and_weights(dim, interp, refined_particle_nbr) simInput = NoOverwriteDict({"refined_particle_nbr": refined_particle_nbr}) self.simulator = Simulator(populate_simulation(dim, interp, **simInput)) self.simulator.initialize() dw = self.simulator.data_wrangler() max_per_pop = 0 leaving_particles = 0 for pop, particles in dw.getPatchLevel(1).getParticles().items(): per_pop = 0 for key, patches in particles.items(): for patch in patches: leaving_particles += self._less_per_dim(dim, refined_particle_nbr, patch) per_pop += patch.data.size() max_per_pop = max(max_per_pop, per_pop) prev_min_diff = prev_split_particle_max * min_diff # while splitting particles may leave the domain area # so we remove the particles from the border cells of each patch self.assertTrue( max_per_pop > prev_min_diff - (leaving_particles) ) if prev_split_particle_max > 0: prev_max_diff = prev_min_diff * dim * max_diff self.assertTrue(max_per_pop < prev_max_diff) prev_split_particle_max = max_per_pop self.simulator = None
class DataWranglerTest(unittest.TestCase): def __init__(self, *args, **kwargs): super(DataWranglerTest, self).__init__(*args, **kwargs) self.dw = None self.simulator = None def test_1d(self): for interp in range(1, 4): self.simulator = Simulator(populate_simulation(1, interp)) self.simulator.initialize() self.dw = self.simulator.data_wrangler() print("\n", self.dw.lvl0IonDensity()) print("\n", self.dw.lvl0BulkVelocity()) print("\n", self.dw.lvl0PopDensity()) print("\n", self.dw.lvl0PopFluxes()) print("\n", self.dw.lvl0EM()) for pop, particles in self.dw.getPatchLevel( 0).getParticles().items(): for key, patches in particles.items(): for patch in patches: self.assertTrue(isinstance(patch.lower, np.ndarray)) self.assertTrue(isinstance(patch.upper, np.ndarray)) self.simulator = None def tearDown(self): del self.dw if self.simulator is not None: self.simulator.reset()
def main(): import random startMPI() rando = random.randint(0, 1e10) Simulator(config(L0_diags, {"seed": rando})).run().reset() refinement_boxes={"L0": {"B0": [( 7, 40), ( 20, 60)]}} sim = config(L0L1_diags, {"seed": rando}, refinement_boxes) Simulator(sim, post_advance=post_advance).run()
def _do_dim(self, dim, input, valid: bool = False): for interp in range(1, 4): try: self.simulator = Simulator( populate_simulation(dim, interp, **input)) self.simulator.initialize() self.assertTrue(valid) self.simulator = None except ValueError as e: self.assertTrue(not valid)
def main(): import random startMPI() rando = random.randint(0, 1e10) refinement_boxes = {"L0": {"B0": [(10, 10), (14, 14)]}} Simulator(config(L0_diags, {"seed": rando}), post_advance=post_advance_0).run().reset() sim = config(L0L1_diags, {"seed": rando}, refinement_boxes) Simulator(sim, post_advance=post_advance_1).run()
def main(): noRefinement(diagdir="noRefinement") Simulator(gv.sim).run() gv.sim = None withTagging(diagdir="withTagging") Simulator(gv.sim, post_advance=post_advance).run() gv.sim = None if cpp.mpi_rank() == 0: make_figure()
def test_mode_conserve(self, dim=1, interp=1, simput=dup(simArgs)): print(f"test_mode_conserve dim/interp:{dim}/{interp}") for key in ["cells", "dl", "boundary_types"]: simput[key] = [simput[key]] * dim # first simulation local_out = f"{out}/conserve/{dim}/{interp}/mpi_n/{cpp.mpi_size()}/id{self.ddt_test_id()}" self.register_diag_dir_for_cleanup(local_out) simput["restart_options"]["dir"] = local_out ph.global_vars.sim = ph.Simulation(**simput) self.assertEqual(len(ph.global_vars.sim.restart_options["timestamps"]), 1) self.assertEqual(ph.global_vars.sim.restart_options["timestamps"][0], .004) model = setup_model() Simulator(ph.global_vars.sim).run().reset() # second simulation (not restarted) ph.global_vars.sim = None simput["restart_options"]["mode"] = "conserve" ph.global_vars.sim = ph.Simulation(**simput) self.assertEqual(len(ph.global_vars.sim.restart_options["timestamps"]), 0)
def test_dump_diags_timestamps(self): print("test_dump_diags dim/interp:{}/{}".format(1, 1)) simulation = ph.Simulation(**simArgs.copy()) sim = simulation dump_every = 1 timestamps = np.arange(0, sim.final_time + sim.time_step, dump_every * sim.time_step) setup_model(10) for quantity in ["B"]: ElectromagDiagnostics( quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps, flush_every=ElectromagDiagnostics.h5_flush_never, ) Simulator(simulation).run() def make_time(stamp): return "{:.10f}".format(stamp) for diagInfo in ph.global_vars.sim.diagnostics: h5_filename = os.path.join(out, h5_filename_from(diagInfo)) self.assertTrue(os.path.exists(h5_filename)) h5_file = h5py.File(h5_filename, "r") for timestamp in timestamps: self.assertIn(make_time(timestamp), h5_file[h5_time_grp_key])
def _test_dump_diags(self, dim, **simInput): test_id = self.ddt_test_id() for key in ["cells", "dl", "boundary_types"]: simInput[key] = [simInput[key] for d in range(dim)] for interp in range(1, 4): local_out = f"{out}_dim{dim}_interp{interp}_mpi_n_{cpp.mpi_size()}_id{test_id}" simInput["diag_options"]["options"]["dir"] = local_out simulation = ph.Simulation(**simInput) self.assertTrue(len(simulation.cells) == dim) dump_all_diags(setup_model().populations) self.simulator = Simulator( simulation).initialize().advance().reset() self.assertTrue( any([ diagInfo.quantity.endswith("tags") for diagInfo in ph.global_vars.sim.diagnostics ])) checks = 0 found = 0 for diagInfo in ph.global_vars.sim.diagnostics: h5_filepath = os.path.join(local_out, h5_filename_from(diagInfo)) self.assertTrue(os.path.exists(h5_filepath)) h5_file = h5py.File(h5_filepath, "r") self.assertTrue("0.0000000000" in h5_file[h5_time_grp_key]) # init dump n_patches = len( list(h5_file[h5_time_grp_key]["0.0000000000"] ["pl0"].keys())) if h5_filepath.endswith("tags.h5"): found = 1 hier = hierarchy_from(h5_filename=h5_filepath) patches = hier.level(0).patches tag_found = 0 for patch in patches: self.assertTrue(len(patch.patch_datas.items())) for qty_name, pd in patch.patch_datas.items(): self.assertTrue((pd.dataset[:] >= 0).all()) self.assertTrue((pd.dataset[:] < 2).all()) tag_found |= (pd.dataset[:] == 1).any() checks += 1 self.assertEqual(found, 1) self.assertEqual(tag_found, 1) self.assertEqual(checks, n_patches) self.simulator = None ph.global_vars.sim = None
def test_1d(self): for interp in range(1, 4): self.simulator = Simulator(populate_simulation(1, interp)) self.simulator.initialize() self.dw = self.simulator.data_wrangler() print("\n", self.dw.lvl0IonDensity()) print("\n", self.dw.lvl0BulkVelocity()) print("\n", self.dw.lvl0PopDensity()) print("\n", self.dw.lvl0PopFluxes()) print("\n", self.dw.lvl0EM()) for pop, particles in self.dw.getPatchLevel(0).getParticles().items(): for key, patches in particles.items(): for patch in patches: self.assertTrue(isinstance(patch.lower, np.ndarray)) self.assertTrue(isinstance(patch.upper, np.ndarray)) self.simulator = None
def _test_dump_diags(self, dim, **simInput): test_id = self.ddt_test_id() # configure simulation dim sized values for key in ["cells", "dl", "boundary_types"]: simInput[key] = [simInput[key] for d in range(dim)] b0 = [[10 for i in range(dim)], [19 for i in range(dim)]] simInput["refinement_boxes"] = {"L0": {"B0": b0}} for interp in range(1, 4): print("_test_dump_diags dim/interp:{}/{}".format(dim, interp)) local_out = f"{out}_dim{dim}_interp{interp}_mpi_n_{cpp.mpi_size()}_id{test_id}" simInput["diag_options"]["options"]["dir"] = local_out simulation = ph.Simulation(**simInput) self.assertTrue(len(simulation.cells) == dim) dump_all_diags(setup_model().populations) self.simulator = Simulator(simulation).initialize().advance() for diagInfo in ph.global_vars.sim.diagnostics: # diagInfo.quantity starts with a / this interferes with os.path.join, hence [1:] h5_filename = os.path.join(local_out, (diagInfo.quantity + ".h5").replace('/', '_')[1:]) print("h5_filename", h5_filename) h5_file = h5py.File(h5_filename, "r") self.assertTrue("t0.000000" in h5_file) # init dump self.assertTrue("t0.000100" in h5_file) self.assertTrue("pl1" in h5_file["t0.000100"]) self.assertFalse("pl0" in h5_file["t0.000100"]) self.assertTrue("t0.001000" in h5_file) # advance dump # SEE https://github.com/PHAREHUB/PHARE/issues/275 if dim == 1: # REMOVE WHEN PHARESEE SUPPORTS 2D self.assertTrue(os.path.exists(h5_filename)) hier = hierarchy_from(h5_filename=h5_filename) if h5_filename.endswith("domain.h5"): for patch in hier.level(0).patches: for qty_name, pd in patch.patch_datas.items(): splits = pd.dataset.split(ph.global_vars.sim) self.assertTrue(splits.size() == pd.dataset.size() * 2) print("splits.iCell", splits.iCells) print("splits.delta", splits.deltas) print("splits.weight", splits.weights) print("splits.charge", splits.charges) print("splits.v", splits.v) self.simulator = None ph.global_vars.sim = None
def main(): from pyphare.cpp import cpp_lib cpp = cpp_lib() from pyphare.pharesee.run import Run from pyphare.pharesee.hierarchy import flat_finest_field config() Simulator(gv.sim).run() if cpp.mpi_rank() == 0: vphi, t, phi, a, k = phase_speed(".", 0.01, 1000) r = Run(".") t = get_times_from_h5("EM_B.h5") fig, ax = plt.subplots(figsize=(9, 5), nrows=1) B = r.GetB(t[int(len(t) / 2)]) by, xby = flat_finest_field(B, "By") ax.plot(xby, by, label="t = 500", alpha=0.6) sorted_patches = sorted(B.patch_levels[1].patches, key=lambda p: p.box.lower[0]) x0 = sorted_patches[0].patch_datas["By"].x[0] x1 = sorted_patches[-1].patch_datas["By"].x[-1] B = r.GetB(t[-1]) by, xby = flat_finest_field(B, "By") ax.plot(xby, by, label="t = 1000", alpha=0.6) ax.plot(xby, wave(xby, 0.01, 2 * np.pi / 1000., 2 * np.pi / 1000 * 500), color="k", ls="--", label="T=500 (theory)") B = r.GetB(t[0]) by, xby = flat_finest_field(B, "By") ax.plot(xby, by, label="t = 0", color="k") ax.set_xlabel("x") ax.set_ylabel(r"$B_y$") ax.legend(ncol=4, loc="upper center") ax.set_ylim((-0.012, 0.013)) ax.set_title(r"$V_\phi = {:6.4f}$".format(vphi.mean())) ax.axvspan(x0, x1, alpha=0.2) fig.tight_layout() fig.savefig("alfven_wave.png", dpi=200) assert np.mean(np.abs(vphi - 1) < 5e-2)
def main(): from pybindlibs.cpp import mpi_rank config() simulator = Simulator(gv.sim) simulator.initialize() simulator.run() if mpi_rank() == 0: times, first_mode, ampl, gamma, damped_mode, omega \ = growth_b_right_hand(os.path.join(os.curdir, "ion_ion_beam1d")) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.set_title("Right Hand Resonant mode (Beam instability)") ax1.stem(times, first_mode, linefmt='-k', basefmt=' ', use_line_collection=True) ax1.plot(times, yaebx(times, ampl, gamma), color='r', linestyle='-', marker='') ax1.text(0.04, 0.80, "From Gary et al., 1985 (ApJ : 10.1086/162797)", transform=ax1.transAxes) ax1.set_ylabel("Most unstable mode") ax1.set_title("Right Hand Resonant mode (Beam instability)") ax1.text(0.30, 0.50, "gamma = {:5.3f}... expected 0.09".format(gamma), transform=ax1.transAxes) ax2.plot(times, damped_mode, color='g', linestyle='', marker='o') ax2.set_xlabel("Time") ax2.set_ylabel("Real mode") ax2.text(0.48, 0.30, "~ 3 periods until t=50", transform=ax2.transAxes) ax2.text(0.40, 0.20, "omega (real) = {:5.3f}... expected 0.19".format(omega), transform=ax2.transAxes) fig.savefig("ion_ion_beam1d.png") # compare with the values given gary et al. 1985 assert np.fabs(gamma-0.09) < 2e-2
def main(): from pybindlibs.cpp import mpi_rank withTagging(diagdir="withTagging") simulator = Simulator(gv.sim) simulator.initialize().run() gv.sim = None noRefinement(diagdir="noRefinement") simulator = Simulator(gv.sim) simulator.initialize().run() gv.sim = None if mpi_rank() == 0: make_figure()
def main(): config() simulator = Simulator(gv.sim) simulator.initialize() simulator.run() if cpp.mpi_rank() == 0: b = hierarchy_from(h5_filename="phare_outputs/EM_B.h5") plot(b)
def test_hierarchy_timestamp_cadence(self, refinement_boxes): dim = refinement_boxes["L0"]["B0"].ndim time_step = .001 # time_step_nbr chosen to force diagnostics dumping double imprecision cadence calculations accuracy testing time_step_nbr = 101 final_time = time_step * time_step_nbr for trailing in [0, 1]: # 1 = skip init dumps for i in [2, 3]: simInput = simArgs.copy() diag_outputs = f"phare_outputs_hierarchy_timestamp_cadence_{dim}_{self.ddt_test_id()}_{i}" simInput["diag_options"]["options"]["dir"] = diag_outputs simInput["time_step_nbr"] = time_step_nbr ph.global_vars.sim = None simulation = ph.Simulation(**simInput) setup_model(10) timestamps = np.arange(0, final_time, time_step * i)[trailing:] for quantity in ["B"]: ElectromagDiagnostics( quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps, flush_every=ElectromagDiagnostics.h5_flush_never, ) Simulator(simulation).run() for diagInfo in simulation.diagnostics: h5_filename = os.path.join(diag_outputs, h5_filename_from(diagInfo)) self.assertTrue(os.path.exists(h5_filename)) hier = hierarchy_from(h5_filename=h5_filename) time_hier_keys = list(hier.time_hier.keys()) self.assertEqual(len(time_hier_keys), len(timestamps)) for i, timestamp in enumerate(time_hier_keys): self.assertEqual(hier.format_timestamp(timestamps[i]), timestamp)
def main(): for name,config in zip(("uni", "td"),(config_uni, config_td)): params=[{"vx":-1,"diagdir":name + "_vxm2"}, {"vx":2,"diagdir":name + "_vx2"}] for param in params: if param["vx"] >-1: continue print("-----------------------------------") print(param) print("-----------------------------------") config(**param) simulator = Simulator(gv.sim) simulator.initialize() simulator.run() gv.sim = None
def _test_dump_diags(self, dim, **simInput): test_id = self.ddt_test_id() # configure simulation dim sized values for key in ["cells", "dl", "boundary_types"]: simInput[key] = [simInput[key] for d in range(dim)] b0 = [[10 for i in range(dim)], [19 for i in range(dim)]] simInput["refinement_boxes"] = {"L0": {"B0": b0}} py_attrs = [ f"{dep}_version" for dep in ["samrai", "highfive", "pybind"] ] py_attrs += ["git_hash"] for interp in range(1, 4): print("test_dump_diags dim/interp:{}/{}".format(dim, interp)) local_out = f"{out}_dim{dim}_interp{interp}_mpi_n_{cpp.mpi_size()}_id{test_id}" simInput["diag_options"]["options"]["dir"] = local_out simulation = ph.Simulation(**simInput) self.assertTrue(len(simulation.cells) == dim) dump_all_diags(setup_model().populations) self.simulator = Simulator( simulation).initialize().advance().reset() refined_particle_nbr = simulation.refined_particle_nbr self.assertTrue( any([ diagInfo.quantity.endswith("domain") for diagInfo in ph.global_vars.sim.diagnostics ])) particle_files = 0 for diagInfo in ph.global_vars.sim.diagnostics: h5_filepath = os.path.join(local_out, h5_filename_from(diagInfo)) self.assertTrue(os.path.exists(h5_filepath)) h5_file = h5py.File(h5_filepath, "r") self.assertTrue("0.0000000000" in h5_file[h5_time_grp_key]) # init dump self.assertTrue( "0.0010000000" in h5_file[h5_time_grp_key]) # first advance dump h5_py_attrs = h5_file["py_attrs"].attrs.keys() for py_attr in py_attrs: self.assertIn(py_attr, h5_py_attrs) hier = hierarchy_from(h5_filename=h5_filepath) if h5_filepath.endswith("domain.h5"): particle_files += 1 self.assertTrue("pop_mass" in h5_file.attrs) if "protons" in h5_filepath: self.assertTrue(h5_file.attrs["pop_mass"] == 1) elif "alpha" in h5_filepath: self.assertTrue(h5_file.attrs["pop_mass"] == 4) else: raise RuntimeError("Unknown population") self.assertGreater(len(hier.level(0).patches), 0) for patch in hier.level(0).patches: self.assertTrue(len(patch.patch_datas.items())) for qty_name, pd in patch.patch_datas.items(): splits = pd.dataset.split(ph.global_vars.sim) self.assertTrue(splits.size() > 0) self.assertTrue(pd.dataset.size() > 0) self.assertTrue( splits.size() == pd.dataset.size() * refined_particle_nbr) self.assertEqual(particle_files, ph.global_vars.sim.model.nbr_populations()) self.simulator = None ph.global_vars.sim = None
class SimulatorRefinedParticleNbr(unittest.TestCase): def __init__(self, *args, **kwargs): super(SimulatorRefinedParticleNbr, self).__init__(*args, **kwargs) with open(os.path.join(project_root, "res/amr/splitting.yml"), 'r') as stream: try: self.yaml_root = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) sys.exit(1) # while splitting particles may leave the domain area # so we remove the particles from the border cells of each patch def _less_per_dim(self, dim, refined_particle_nbr, patch): if dim == 1: return refined_particle_nbr * 2 cellNbr = patch.upper - patch.lower + 1 if dim == 2: return refined_particle_nbr * ((cellNbr[0] * 2 + (cellNbr[1] * 2))) raise ValueError("Unhandled dimension for function") def _check_deltas_and_weights(self, dim, interp, refined_particle_nbr): yaml_dim = self.yaml_root["dimension_" + str(dim)] yaml_interp = yaml_dim["interp_" + str(interp)] yaml_n_particles = yaml_interp["N_particles_" + str(refined_particle_nbr)] yaml_delta = [float(s) for s in str(yaml_n_particles["delta"]).split(" ")] yaml_weight = [float(s) for s in str(yaml_n_particles["weight"]).split(" ")] splitter_t = splitter_type(dim, interp, refined_particle_nbr) np.testing.assert_allclose(yaml_delta, splitter_t.delta) np.testing.assert_allclose(yaml_weight, splitter_t.weight) def _do_dim(self, dim, min_diff, max_diff): from pyphare.pharein.simulation import valid_refined_particle_nbr for interp in range(1, 4): prev_split_particle_max = 0 for refined_particle_nbr in valid_refined_particle_nbr[dim][interp]: self._check_deltas_and_weights(dim, interp, refined_particle_nbr) simInput = NoOverwriteDict({"refined_particle_nbr": refined_particle_nbr}) self.simulator = Simulator(populate_simulation(dim, interp, **simInput)) self.simulator.initialize() dw = self.simulator.data_wrangler() max_per_pop = 0 leaving_particles = 0 for pop, particles in dw.getPatchLevel(1).getParticles().items(): per_pop = 0 for key, patches in particles.items(): for patch in patches: leaving_particles += self._less_per_dim(dim, refined_particle_nbr, patch) per_pop += patch.data.size() max_per_pop = max(max_per_pop, per_pop) prev_min_diff = prev_split_particle_max * min_diff # while splitting particles may leave the domain area # so we remove the particles from the border cells of each patch self.assertTrue( max_per_pop > prev_min_diff - (leaving_particles) ) if prev_split_particle_max > 0: prev_max_diff = prev_min_diff * dim * max_diff self.assertTrue(max_per_pop < prev_max_diff) prev_split_particle_max = max_per_pop self.simulator = None """ 1d refine 10 cells in 1d, ppc 100 10 * 2 * ppc = 200 10 * 3 * ppc = 300 300 / 200 = 1.5 10 * 4 * ppc = 400 500 / 400 = 1.33 10 * 5 * ppc = 500 500 / 400 = 1.25 taking the minimul diff across permutations current to previous should be at least this times bigger """ PREVIOUS_ITERATION_MIN_DIFF_1d = 1.25 PREVIOUS_ITERATION_MAX_DIFF_1d = 1.50 def test_1d(self): This = type(self) self._do_dim(1, This.PREVIOUS_ITERATION_MIN_DIFF_1d, This.PREVIOUS_ITERATION_MAX_DIFF_1d) """ 2d refine 10x10 cells in 2d, ppc 100 10 * 10 * 4 * ppc = 400 10 * 10 * 8 * ppc = 800 800 / 400 = 1.5 10 * 10 * 9 * ppc = 900 900 / 800 = 1.125 """ PREVIOUS_ITERATION_MIN_DIFF_2d = 1.125 PREVIOUS_ITERATION_MAX_DIFF_2d = 1.50 def test_2d(self): This = type(self) self._do_dim(2, This.PREVIOUS_ITERATION_MIN_DIFF_2d, This.PREVIOUS_ITERATION_MAX_DIFF_2d) def tearDown(self): # needed in case exception is raised in test and Simulator # not reset properly if self.simulator is not None: self.simulator.reset()
resistivity=0.001, hyper_resistivity=0.001, diag_options={"format": "phareh5", "options": {"dir": diag_outputs, "mode":"overwrite"}}, refinement_boxes={}, ) ph.MaxwellianFluidModel( bx=bx, by=by, bz=bz, protons={"charge": 1, "density": density, "init":{"seed": seed}, **{ "nbr_part_per_cell":100, "vbulkx": vxyz, "vbulky": vxyz, "vbulkz": vxyz, "vthx": vthxyz, "vthy": vthxyz, "vthz": vthxyz, } }, ) ph.ElectronModel(closure="isothermal", Te=0.0) from tests.diagnostic import all_timestamps timestamps = all_timestamps(ph.global_vars.sim) timestamps = np.asarray([timestamps[0], timestamps[-1]]) for quantity in ["E", "B"]: ph.ElectromagDiagnostics( quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps, ) if ph.PHARE_EXE or __name__=="__main__": config() if __name__=="__main__": from pyphare.simulator.simulator import Simulator Simulator(ph.global_vars.sim).run()
def main(): config() s = Simulator(gv.sim, post_advance=post_advance) s.initialize() post_advance(0) s.run()
def main(): fromNoise() simulator = Simulator(gv.sim) simulator.initialize() simulator.run()
class SimulatorValidation(unittest.TestCase): def __init__(self, *args, **kwargs): super(SimulatorValidation, self).__init__(*args, **kwargs) self.simulator = None def tearDown(self): if self.simulator is not None: self.simulator.reset() def _do_dim(self, dim, input, valid: bool = False): for interp in range(1, 4): try: self.simulator = Simulator( populate_simulation(dim, interp, **input)) self.simulator.initialize() self.assertTrue(valid) self.simulator = None except ValueError as e: self.assertTrue(not valid) """ The first set of boxes "B0": [(10,), (14,)] Are configured to force there to be a single patch on L0 This creates a case with MPI that there are an unequal number of Patches across MPI domains. This case must be handled and not hang due to collective calls not being handled properly. """ valid1D = [ dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": [(10, ), (14, )] } } }), dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": [(5, ), (55, )] } } }), dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": Box(5, 55) } } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 55)] } }), dup({ "cells": [65], "refinement_boxes": { 0: [Box(5, 55)] } }), dup({ "cells": [65], "refinement_boxes": { 0: [Box(0, 55)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 14), Box(15, 25)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(12, 48)], "L2": [Box(60, 64)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(12, 48)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(20, 30)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 1 }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(10, 50)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(15, 49)] } }), dup({ "cells": [65], "refinement_boxes": None, "smallest_patch_size": 20, "largest_patch_size": 20, "nesting_buffer": 10 }), # finer box is within set of coarser boxes dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 9), Box(10, 15)], "L1": [Box(11, 29)] } }), ] invalid1D = [ # finer box outside lower dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 24)], "L1": [Box(9, 30)] } }), # finer box outside upper dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 24)], "L1": [Box(15, 50)] } }), # overlapping boxes dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 15), Box(15, 25)] } }), # box.upper outside domain dup({ "cells": [55], "refinement_boxes": { "L0": { "B0": [(5, ), (65, )] } } }), # largest_patch_size > smallest_patch_size dup({ "smallest_patch_size": 100, "largest_patch_size": 64, }), # refined_particle_nbr doesn't exist dup({"refined_particle_nbr": 1}), # L2 box incompatible with L1 box due to nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 2 }), # negative nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": -1 }), # too large nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 33 }), dup({ "cells": [65], "refinement_boxes": None, "largest_patch_size": 20, "nesting_buffer": 46 }), # finer box is not within set of coarser boxes dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 9), Box(11, 15)], "L1": [Box(11, 29)] } }), ] @data(*valid1D) def test_1d_valid(self, input): self._do_dim(1, input, True) @data(*invalid1D) def test_1d_invalid(self, input): self._do_dim(1, input) valid2D = [ dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 55)] } }), dup({ "smallest_patch_size": None, "largest_patch_size": None }), dup({ "smallest_patch_size": (10, 10), "largest_patch_size": (20, 20) }), dup({ "smallest_patch_size": [10, 10], "largest_patch_size": [20, 20] }), dup({ "smallest_patch_size": (10, 10), "largest_patch_size": None }), dup({ "smallest_patch_size": None, "largest_patch_size": (20, 20) }), dup({"smallest_patch_size": (10, 10)}), dup({"largest_patch_size": (20, 20)}), dup({ "smallest_patch_size": 10, "largest_patch_size": (20, 20) }), dup({ "smallest_patch_size": (10, 10), "largest_patch_size": 20 }), dup({ "smallest_patch_size": [10, 10], "largest_patch_size": (20, 20) }), dup({ "smallest_patch_size": (10, 10), "largest_patch_size": [20, 20] }), dup({ "cells": [65, 65], "refinement_boxes": None, "smallest_patch_size": 20, "largest_patch_size": 20, "nesting_buffer": 10 }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": { "B0": Box2D(5, 55) } } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 55)] } }), dup({ "cells": [65, 65], "refinement_boxes": { 0: [Box2D(5, 55)] } }), dup({ "cells": [65, 65], "refinement_boxes": { 0: [Box2D(0, 55)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 14), Box2D(15, 25)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(12, 48)], "L2": [Box2D(60, 64)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(12, 48)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(20, 30)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(11, 49)] }, "nesting_buffer": 1 }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(10, 50)] } }), dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(15, 49)] } }), ] invalid2D = [ # finer box outside lower dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 24)], "L1": [Box2D(9, 30)] } }), # finer box outside lower dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 24)], "L1": [Box2D(9, 30)] } }), # finer box outside upper dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 24)], "L1": [Box2D(15, 50)] } }), # overlapping boxes dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 15), Box2D(15, 25)] } }), # box.upper outside domain dup({ "cells": [55, 55], "refinement_boxes": { "L0": { "B0": Box2D( 5, 65, ) } } }), # largest_patch_size > smallest_patch_size dup({ "smallest_patch_size": 100, "largest_patch_size": 64, }), # refined_particle_nbr doesn't exist dup({"refined_particle_nbr": 1}), # L2 box incompatible with L1 box due to nesting buffer dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(11, 49)] }, "nesting_buffer": 2 }), # negative nesting buffer dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(11, 49)] }, "nesting_buffer": -1 }), # too large nesting buffer dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 25)], "L1": [Box2D(11, 49)] }, "nesting_buffer": 33 }), dup({ "cells": [65, 65], "refinement_boxes": None, "largest_patch_size": 20, "nesting_buffer": 46 }), # finer box is not within set of coarser boxes dup({ "cells": [65, 65], "refinement_boxes": { "L0": [Box2D(5, 9), Box2D(11, 15)], "L1": [Box2D(11, 29)] } }), ] @data(*valid2D) def test_2d_valid(self, input): self._do_dim(2, input, True) @data(*invalid2D) def test_2d_invalid(self, input): self._do_dim(2, input)
def getHierarchy(self, interp_order, refinement_boxes, qty, nbr_part_per_cell=100, diag_outputs="phare_outputs", density=lambda x: 0.3 + 1. / np.cosh((x - 6) / 4.)**2, beam=False, smallest_patch_size=10, largest_patch_size=10, cells=120, dl=0.1): from pyphare.pharein import global_vars global_vars.sim = None startMPI() Simulation(smallest_patch_size=smallest_patch_size, largest_patch_size=largest_patch_size, time_step_nbr=30000, final_time=30., boundary_types="periodic", cells=cells, dl=dl, interp_order=interp_order, refinement_boxes=refinement_boxes, diag_options={ "format": "phareh5", "options": { "dir": diag_outputs, "mode": "overwrite" } }) def beam_density(x): return np.zeros_like(x) + 0.3 def by(x): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() return 0.1 * np.cos(2 * np.pi * x / L[0]) def bz(x): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() return 0.1 * np.sin(2 * np.pi * x / L[0]) def bx(x): return 1. def vx(x): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() return 0.1 * np.cos(2 * np.pi * x / L[0]) + 0.2 def vy(x): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() return 0.1 * np.cos(2 * np.pi * x / L[0]) def vz(x): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() return 0.1 * np.sin(2 * np.pi * x / L[0]) def vthx(x): return 0.01 + np.zeros_like(x) def vthy(x): return 0.01 + np.zeros_like(x) def vthz(x): return 0.01 + np.zeros_like(x) if beam: MaxwellianFluidModel(bx=bx, by=by, bz=bz, protons={ "charge": 1, "density": density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }, beam={ "charge": 1, "density": beam_density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }) else: MaxwellianFluidModel(bx=bx, by=by, bz=bz, protons={ "charge": 1, "density": density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }) ElectronModel(closure="isothermal", Te=0.12) for quantity in ["E", "B"]: ElectromagDiagnostics(quantity=quantity, write_timestamps=np.zeros(1), compute_timestamps=np.zeros(1)) for quantity in ["density", "bulkVelocity"]: FluidDiagnostics(quantity=quantity, write_timestamps=np.zeros(1), compute_timestamps=np.zeros(1)) poplist = ["protons", "beam"] if beam else ["protons"] for pop in poplist: for quantity in ["density", "flux"]: FluidDiagnostics(quantity=quantity, write_timestamps=np.zeros(1), compute_timestamps=np.zeros(1), population_name=pop) for quantity in ['domain', 'levelGhost', 'patchGhost']: ParticleDiagnostics(quantity=quantity, compute_timestamps=np.zeros(1), write_timestamps=np.zeros(1), population_name=pop) simulator = Simulator(global_vars.sim) simulator.initialize() if qty == "b": b_hier = hierarchy_from(h5_filename=diag_outputs + "/EM_B.h5") return b_hier is_particle_type = qty == "particles" or qty == "particles_patch_ghost" if is_particle_type: particle_hier = None if qty == "particles": particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_domain.h5") particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_levelGhost.h5", hier=particle_hier) if is_particle_type: particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_patchGhost.h5", hier=particle_hier) if qty == "particles": merge_particles(particle_hier) if is_particle_type: return particle_hier if qty == "moments": mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_density.h5") mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_bulkVelocity.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_density.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_flux.h5", hier=mom_hier) if beam: mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_beam_density.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_beam_flux.h5", hier=mom_hier) return mom_hier
def main(): config() simulator = Simulator(gv.sim) simulator.initialize() simulator.run()
def getHierarchy(self, interp_order, refinement_boxes, qty, diag_outputs, nbr_part_per_cell=100, density=_density, smallest_patch_size=None, largest_patch_size=20, cells=120, time_step=0.001, model_init={}, dl=0.2, extra_diag_options={}, time_step_nbr=1, timestamps=None, ndim=1, block_merging_particles=False): diag_outputs = f"phare_outputs/advance/{diag_outputs}" from pyphare.pharein import global_vars global_vars.sim = None if smallest_patch_size is None: from pyphare.pharein.simulation import check_patch_size _, smallest_patch_size = check_patch_size( ndim, interp_order=interp_order, cells=cells) extra_diag_options["mode"] = "overwrite" extra_diag_options["dir"] = diag_outputs self.register_diag_dir_for_cleanup(diag_outputs) Simulation( smallest_patch_size=smallest_patch_size, largest_patch_size=largest_patch_size, time_step_nbr=time_step_nbr, time_step=time_step, boundary_types=["periodic"] * ndim, cells=np_array_ify(cells, ndim), dl=np_array_ify(dl, ndim), interp_order=interp_order, refinement_boxes=refinement_boxes, diag_options={ "format": "phareh5", "options": extra_diag_options }, strict=True, ) def S(x, x0, l): return 0.5 * (1 + np.tanh((x - x0) / l)) def bx(*xyz): return 1. def by(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def bz(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vx(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vy(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vz(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vth(*xyz): return 0.01 + np.zeros_like(xyz[0]) def vthx(*xyz): return vth(*xyz) def vthy(*xyz): return vth(*xyz) def vthz(*xyz): return vth(*xyz) MaxwellianFluidModel(bx=bx, by=by, bz=bz, protons={ "charge": 1, "density": density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": model_init, }) ElectronModel(closure="isothermal", Te=0.12) if timestamps is None: timestamps = all_timestamps(global_vars.sim) for quantity in ["E", "B"]: ElectromagDiagnostics(quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps) for quantity in ["density", "bulkVelocity"]: FluidDiagnostics(quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps) poplist = ["protons"] for pop in poplist: for quantity in ["density", "flux"]: FluidDiagnostics(quantity=quantity, write_timestamps=timestamps, compute_timestamps=timestamps, population_name=pop) for quantity in ['domain', 'levelGhost', 'patchGhost']: ParticleDiagnostics(quantity=quantity, compute_timestamps=timestamps, write_timestamps=timestamps, population_name=pop) Simulator(global_vars.sim).run() eb_hier = None if qty in ["e", "eb", "fields"]: eb_hier = hierarchy_from(h5_filename=diag_outputs + "/EM_E.h5", hier=eb_hier) if qty in ["b", "eb", "fields"]: eb_hier = hierarchy_from(h5_filename=diag_outputs + "/EM_B.h5", hier=eb_hier) if qty in ["e", "b", "eb"]: return eb_hier is_particle_type = qty == "particles" or qty == "particles_patch_ghost" if is_particle_type: particle_hier = None if qty == "particles": particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_domain.h5") particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_levelGhost.h5", hier=particle_hier) if is_particle_type: particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_patchGhost.h5", hier=particle_hier) if not block_merging_particles and qty == "particles": merge_particles(particle_hier) if is_particle_type: return particle_hier if qty == "moments" or qty == "fields": mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_density.h5", hier=eb_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_bulkVelocity.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_density.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_flux.h5", hier=mom_hier) return mom_hier
def main(): cases = [0.01,0.05,0.1,0.3,0.5,0.75,1,2] dls = [0.2, 0.1] nbrcells = [100,200] nbrdts = [25000, 100000] for vth in cases: for dl, nbrcell, nbrdt in zip(dls, nbrcells, nbrdts): uniform(vth, dl, nbrcell, nbrdt) simulator = Simulator(gv.sim) simulator.initialize() simulator.run() gv.sim = None paths = glob("*vth*") runs_vth = {} Bnrj_vth = {} K_vth = {} times_vth={} #extract vth and dx from the name of the directory vthdx = np.asarray(sorted([[float(x) for x in path.split("/")[-1].strip("vth").split("dx")] for path in paths], key=lambda x:x[1])) paths = sorted(paths, key=lambda path: float(paths[0].split("/")[-1].strip("vth").split("dx")[1])) #now for each directory, extract magnetic and kinetic energies for path in paths: runs_vth[path], Bnrj_vth[path], K_vth[path], times_vth[path] = energies(path) # we want to plot things as a function of the thermal velocity # for dx=0.1 and 0.2 so extract their values vth0p2 = np.asarray([x[0] for x in vthdx if x[1] == 0.2]) vth0p1 = np.asarray([x[0] for x in vthdx if x[1] == 0.1]) # we will plot the variation of the kinetic energy # relative to is "initial value", by "initial" we mean # its average over some time interval at the start of the run. # here we take 3,4 because before there is some kind of irrelevant transient K0 = {} for path,K in K_vth.items(): it1, it2 = avg_interval(3,4, times_vth[path]) K0[path] = np.mean(K[it1:it2+1]) # calculate the relative variation of kinetic enery for both cases rel_K0p2 = np.asarray([np.abs(K[-1]-K0[path])/K0[path]*100 for path,K in K_vth.items() if "dx0.2" in path]) rel_K0p1 = np.asarray([np.abs(K[-1]-K0[path])/K0[path]*100 for path,K in K_vth.items() if "dx0.1" in path]) fig, ax = plt.subplots() id2 = np.argsort(vth0p2) id1 = np.argsort(vth0p1) ax.plot(vth0p2[id2], rel_K0p2[id2], marker="o", label="dx = 0.2 (dt=0.002, 25k steps)") ax.plot(vth0p1[id1], rel_K0p1[id1], marker="o", label="dx = 0.1 (dt=5e-4, 100k steps)") ax.set_xscale("log") ax.set_yscale("log") ax.set_ylabel(r"$\Delta K$ (%)") ax.set_xlabel("Vth") ax.set_title("kinetic energy evolution as a function of Vth") ax.legend() fig.tight_layout() fig.savefig("K.png")
def getHierarchy(self, interp_order, refinement_boxes, qty, diag_outputs, nbr_part_per_cell=100, density=_density, extra_diag_options={}, beam=False, time_step_nbr=1, smallest_patch_size=None, largest_patch_size=10, cells=120, dl=0.1, ndim=1): diag_outputs = f"phare_outputs/init/{diag_outputs}" from pyphare.pharein import global_vars global_vars.sim = None if smallest_patch_size is None: from pyphare.pharein.simulation import check_patch_size _, smallest_patch_size = check_patch_size( ndim, interp_order=interp_order, cells=cells) extra_diag_options["mode"] = "overwrite" extra_diag_options["dir"] = diag_outputs self.register_diag_dir_for_cleanup(diag_outputs) Simulation( smallest_patch_size=smallest_patch_size, largest_patch_size=largest_patch_size, time_step_nbr=time_step_nbr, final_time=30., boundary_types=["periodic"] * ndim, cells=[cells] * ndim, dl=[dl] * ndim, interp_order=interp_order, refinement_boxes=refinement_boxes, diag_options={ "format": "phareh5", "options": extra_diag_options }, strict=True, ) def beam_density(*xyz): return np.zeros_like(xyz[0]) + 0.3 def bx(*xyz): return 1. def by(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def bz(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.sin(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vx(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vy(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.cos(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vz(*xyz): from pyphare.pharein.global_vars import sim L = sim.simulation_domain() _ = lambda i: 0.1 * np.sin(2 * np.pi * xyz[i] / L[i]) return np.asarray([_(i) for i, v in enumerate(xyz)]).prod(axis=0) def vth(*xyz): return 0.01 + np.zeros_like(xyz[0]) def vthx(*xyz): return vth(*xyz) def vthy(*xyz): return vth(*xyz) def vthz(*xyz): return vth(*xyz) if beam: MaxwellianFluidModel(bx=bx, by=by, bz=bz, protons={ "charge": 1, "density": density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }, beam={ "charge": 1, "density": beam_density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }) else: MaxwellianFluidModel(bx=bx, by=by, bz=bz, protons={ "charge": 1, "density": density, "vbulkx": vx, "vbulky": vy, "vbulkz": vz, "vthx": vthx, "vthy": vthy, "vthz": vthz, "nbr_part_per_cell": nbr_part_per_cell, "init": { "seed": 1337 } }) ElectronModel(closure="isothermal", Te=0.12) for quantity in ["E", "B"]: ElectromagDiagnostics(quantity=quantity, write_timestamps=np.zeros(time_step_nbr), compute_timestamps=np.zeros(time_step_nbr)) for quantity in ["density", "bulkVelocity"]: FluidDiagnostics(quantity=quantity, write_timestamps=np.zeros(time_step_nbr), compute_timestamps=np.zeros(time_step_nbr)) poplist = ["protons", "beam"] if beam else ["protons"] for pop in poplist: for quantity in ["density", "flux"]: FluidDiagnostics(quantity=quantity, write_timestamps=np.zeros(time_step_nbr), compute_timestamps=np.zeros(time_step_nbr), population_name=pop) for quantity in ['domain', 'levelGhost', 'patchGhost']: ParticleDiagnostics(quantity=quantity, compute_timestamps=np.zeros(time_step_nbr), write_timestamps=np.zeros(time_step_nbr), population_name=pop) Simulator(global_vars.sim).initialize().reset() eb_hier = None if qty in ["e", "eb"]: eb_hier = hierarchy_from(h5_filename=diag_outputs + "/EM_E.h5", hier=eb_hier) if qty in ["b", "eb"]: eb_hier = hierarchy_from(h5_filename=diag_outputs + "/EM_B.h5", hier=eb_hier) if qty in ["e", "b", "eb"]: return eb_hier is_particle_type = qty == "particles" or qty == "particles_patch_ghost" if is_particle_type: particle_hier = None if qty == "particles": particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_domain.h5") particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_levelGhost.h5", hier=particle_hier) if is_particle_type: particle_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_patchGhost.h5", hier=particle_hier) if qty == "particles": merge_particles(particle_hier) if is_particle_type: return particle_hier if qty == "moments": mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_density.h5") mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_bulkVelocity.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_density.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_protons_flux.h5", hier=mom_hier) if beam: mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_beam_density.h5", hier=mom_hier) mom_hier = hierarchy_from(h5_filename=diag_outputs + "/ions_pop_beam_flux.h5", hier=mom_hier) return mom_hier
class SimulatorRefineBoxInputs(unittest.TestCase): def __init__(self, *args, **kwargs): super(SimulatorRefineBoxInputs, self).__init__(*args, **kwargs) self.simulator = None def dup(dic): dic = NoOverwriteDict(dic) dic.update(diags.copy()) dic.update( {"diags_fn": lambda model: dump_all_diags(model.populations)}) return dic """ The first set of boxes "B0": [(10,), (14,)] Are configured to force there to be a single patch on L0 This creates a case with MPI that there are an unequal number of Patches across MPI domains. This case must be handled and not hang due to collective calls not being handled properly. """ valid1D = [ dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": [(10, ), (14, )] } } }), dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": [(5, ), (55, )] } } }), dup({ "cells": [65], "refinement_boxes": { "L0": { "B0": Box(5, 55) } } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 55)] } }), dup({ "cells": [65], "refinement_boxes": { 0: [Box(5, 55)] } }), dup({ "cells": [65], "refinement_boxes": { 0: [Box(0, 55)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 14), Box(15, 25)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(12, 48)], "L2": [Box(60, 64)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(12, 48)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(20, 30)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 1 }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(10, 50)] } }), dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(15, 49)] } }), dup({ "cells": [65], "refinement_boxes": None, "smallest_patch_size": 20, "largest_patch_size": 20, "nesting_buffer": 10 }), ] invalid1D = [ # finer box outside lower dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 24)], "L1": [Box(9, 30)] } }), # finer box outside upper dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 24)], "L1": [Box(15, 50)] } }), # overlapping boxes dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 15), Box(15, 25)] } }), # box.upper outside domain dup({ "cells": [55], "refinement_boxes": { "L0": { "B0": [(5, ), (65, )] } } }), # largest_patch_size > smallest_patch_size dup({ "smallest_patch_size": 100, "largest_patch_size": 64, }), # refined_particle_nbr doesn't exist dup({"refined_particle_nbr": 1}), # L2 box incompatible with L1 box due to nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 2 }), # negative nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": -1 }), # too large nesting buffer dup({ "cells": [65], "refinement_boxes": { "L0": [Box(5, 25)], "L1": [Box(11, 49)] }, "nesting_buffer": 33 }), dup({ "cells": [65], "refinement_boxes": None, "largest_patch_size": 20, "nesting_buffer": 46 }), ] def tearDown(self): if self.simulator is not None: self.simulator.reset() def _do_dim(self, dim, input, valid: bool = False): for interp in range(1, 4): try: self.simulator = Simulator( populate_simulation(dim, interp, **input)) self.simulator.initialize() self.assertTrue(valid) self.simulator.dump(self.simulator.currentTime(), self.simulator.timeStep()) self.simulator = None except ValueError as e: self.assertTrue(not valid) @data(*valid1D) def test_1d_valid(self, input): self._do_dim(1, input, True) @data(*invalid1D) def test_1d_invalid(self, input): self._do_dim(1, input)
def test_restarts(self, dim, interp, simInput): print(f"test_restarts dim/interp:{dim}/{interp}") simput = copy.deepcopy(simInput) for key in ["cells", "dl", "boundary_types"]: simput[key] = [simput[key]] * dim if "refinement" not in simput: b0 = [[10 for i in range(dim)], [19 for i in range(dim)]] simput["refinement_boxes"] = {"L0": {"B0": b0}} else: # https://github.com/LLNL/SAMRAI/issues/199 # tagging can handle more than one timestep as it does not # appear subject to regridding issues, so we make more timesteps # to confirm simulations are still equivalent simput["time_step_nbr"] = 10 # if restart time exists it "loads" from restart file # otherwise just saves restart files based on timestamps assert "restart_time" not in simput["restart_options"] simput["interp_order"] = interp time_step = simput["time_step"] time_step_nbr = simput["time_step_nbr"] restart_idx = 4 restart_time = time_step * restart_idx timestamps = [time_step * restart_idx, time_step * time_step_nbr] # first simulation local_out = f"{out}/test/{dim}/{interp}/mpi_n/{cpp.mpi_size()}/id{self.ddt_test_id()}" simput["restart_options"]["dir"] = local_out simput["diag_options"]["options"]["dir"] = local_out ph.global_vars.sim = None ph.global_vars.sim = ph.Simulation(**simput) assert "restart_time" not in ph.global_vars.sim.restart_options model = setup_model() dump_all_diags(model.populations, timestamps=np.array(timestamps)) Simulator(ph.global_vars.sim).run().reset() self.register_diag_dir_for_cleanup(local_out) diag_dir0 = local_out # second restarted simulation local_out = f"{local_out}_n2" simput["diag_options"]["options"]["dir"] = local_out simput["restart_options"]["restart_time"] = restart_time ph.global_vars.sim = None ph.global_vars.sim = ph.Simulation(**simput) assert "restart_time" in ph.global_vars.sim.restart_options model = setup_model() dump_all_diags(model.populations, timestamps=np.array(timestamps)) Simulator(ph.global_vars.sim).run().reset() self.register_diag_dir_for_cleanup(local_out) diag_dir1 = local_out def check(qty0, qty1, checker): checks = 0 for ilvl, lvl0 in qty0.patch_levels.items(): patch_level1 = qty1.patch_levels[ilvl] for p_idx, patch0 in enumerate(lvl0): patch1 = patch_level1.patches[p_idx] for pd_key, pd0 in patch0.patch_datas.items(): pd1 = patch1.patch_datas[pd_key] self.assertNotEqual(id(pd0), id(pd1)) checker(pd0, pd1) checks += 1 return checks def check_particles(qty0, qty1): return check( qty0, qty1, lambda pd0, pd1: self.assertEqual(pd0.dataset, pd1.dataset)) def check_field(qty0, qty1): return check( qty0, qty1, lambda pd0, pd1: np.testing.assert_equal( pd0.dataset[:], pd1.dataset[:])) def count_levels_and_patches(qty): n_levels = len(qty.patch_levels) n_patches = 0 for ilvl, lvl in qty.patch_levels.items(): n_patches += len(qty.patch_levels[ilvl].patches) return n_levels, n_patches n_quantities_per_patch = 20 pops = model.populations for time in timestamps: checks = 0 run0 = Run(diag_dir0) run1 = Run(diag_dir1) checks += check_particles(run0.GetParticles(time, pops), run1.GetParticles(time, pops)) checks += check_field(run0.GetB(time), run1.GetB(time)) checks += check_field(run0.GetE(time), run1.GetE(time)) checks += check_field(run0.GetNi(time), run1.GetNi(time)) checks += check_field(run0.GetVi(time), run1.GetVi(time)) for pop in pops: checks += check_field(run0.GetFlux(time, pop), run1.GetFlux(time, pop)) checks += check_field(run0.GetN(time, pop), run1.GetN(time, pop)) n_levels, n_patches = count_levels_and_patches(run0.GetB(time)) self.assertEqual(n_levels, 2) # at least 2 levels self.assertGreaterEqual(n_patches, n_levels) # at least one patch per level self.assertEqual(checks, n_quantities_per_patch * n_patches)