def checkpoint_manager(self): """Function executed on checkpoint steps to perform various tasks related to checkpoint management. These include copying the flux diagnostic data needed for a restart as well as deleting old checkpoints. """ # only the root processor should execute this function but not on the # first step if not self.check_timestep() or mwxrun.me != 0 or mwxrun.get_it() == 1: return # Save a copy of flux diagnostics, if present, to load when restarting. if ( self.flux_diag is not None and self.flux_diag.last_run_step == mwxrun.get_it() - 1 ): # We use the unorthodox file extension .ckpt (for checkpoint) so # that we can continue to blindly move all .dpkl files from EFS # to S3 when running on AWS dst = os.path.join( self.write_dir, f"{self.name}{(mwxrun.get_it() - 1):05d}", "fluxdata.ckpt" ) self.flux_diag.save(filepath=dst) else: logger.warning( "Flux diagnostic data will not be saved with checkpoint and " "therefore won't be available at restart." ) if self.clear_old_checkpoints: init_restart_util.clean_old_checkpoints( checkpoint_prefix=self.name, num_to_keep=1 )
def text_diag(self): """Write requested information to output.""" if self.check_timestep(): live_parts, parts_per_species_str = self._get_part_nums() # Loop everything else so run doesn't crash on diag error try: wall_time = time.time() - self.prev_time steps = mwxrun.get_it() - self.prev_step # This isn't perfectly accurate, but it's a good approximation self.particle_steps_total += live_parts * steps if wall_time > 0: particle_step_rate = ( self.particle_steps_total - self.previous_particle_steps_total ) / wall_time step_rate = steps / wall_time else: step_rate = 0 particle_step_rate = 0 total_elapsed_time = time.time() - self.start_time particle_step_rate_total = self.particle_steps_total / total_elapsed_time self.status_dict = { 'step': mwxrun.get_it(), 'nplive': live_parts, 'npperspecies': parts_per_species_str, 'wall_time': wall_time, 'step_rate': step_rate, 'particle_step_rate': particle_step_rate, "diag_steps": self.diag_steps, "particle_step_rate_total" : particle_step_rate_total, # TODO: Reimplement when we have new parallel comm hook. # 'iproc': mwxutil.iproc, 'iproc': None, } # Iff memory usage is requested, compute it. if (("system_memory" in self.diag_string) or ("memory_usage" in self.diag_string)): self.update_memory() # Support child objects by having arbitrary updates self._update_status_dict() # Print the string with everything that ended up in the # dictionary. logger.info(self.diag_string.format(**self.status_dict)) self.previous_particle_steps_total = self.particle_steps_total except Exception as err: logger.error( f"Failed to output diag_string {self.diag_string} " f"with error {err}" ) self.prev_time = time.time() self.prev_step = mwxrun.get_it()
def test_restart_from_checkpoint(caplog, force, files_exist): caplog.set_level(logging.WARNING) testing_util.initialize_testingdir( f"test_restart_from_checkpoint_{force}_{files_exist}") run = get_run() if force: restart = True else: restart = None if not files_exist: prefix = "nonexistent_prefix" else: prefix = "checkpoint" try: mwxrun.init_run(restart=restart, checkpoint_dir=os.path.join(testing_util.test_dir, "checkpoint"), checkpoint_prefix=prefix) except RuntimeError as e: if ("There were no checkpoint directories starting with " "nonexistent_prefix!" in str(e)): # There should only be an exception if this restart was forced assert force # if the files didn't exist then we didn't restart, # so there's no need to verify the correct number of steps passed if files_exist: new_max_steps = mwxrun.simulation.max_steps start_step = mwxrun.get_it() if not force and not files_exist: log_lines = [r.msg for r in caplog.records] assert any([ f"There were no checkpoint directories starting with {prefix}!" in l for l in log_lines ]) mwxrun.simulation.step() end_step = mwxrun.get_it() assert end_step - start_step == new_max_steps
def update_ts_dict(self): """Run early in flux analysis to get this diagnostic period's timeseries. """ self.ts_dict = collections.OrderedDict() for (keytype, key), diaglist in self.diags_dict.items(): for diagobj in diaglist: df = diagobj.charge_accum_diag() # only need to hold a copy of the timeseries on root if mwxrun.me == 0: species_list = diagobj.get_species_list() for sp in species_list: subdf = df[df['species_id'] == sp] ts = FluxCalcDataframe( df=subdf, area=self.runinfo.area, step_begin=self.last_run_step + 1, step_end=mwxrun.get_it() + 1 ) sp_name = mwxrun.simulation.species[sp].name if (keytype, key, sp_name) in self.ts_dict: self.ts_dict[(keytype, key, sp_name)] = ( timeseries.concat_crop_timeseries( [self.ts_dict[(keytype, key, sp_name)], ts] ) ) else: self.ts_dict[(keytype, key, sp_name)] = ts
def _load_checkpoint_flux(self): """Function to load the flux data from a simulation checkpoint during a restart. The 'old' flux data will be recorded in the fullhist_dict so that the simulation output will contain the full flux history and not just the flux timeseries after the current restart. """ # the current simulation step presumably matches the restart step restart_step = mwxrun.get_it() # get the flux diag file for the current step flux_diag_file = os.path.join( mwxrun.checkpoint_dir, mwxrun.checkpoint, "fluxdata.ckpt" ) # throw an error if flux diag file does not exist if not os.path.isfile(flux_diag_file): raise RuntimeError( f"{flux_diag_file} doesn't exist but is needed to restart." ) logger.warning( "Loading old flux data at restart assuming names of conductors and " "species in the simulation have not changed" ) # update the full history with the old history old_fluxdiag = FluxDiagFromFile(fluxdatafile=flux_diag_file) self.fullhist_dict = old_fluxdiag.fullhist_dict self.last_run_step = restart_step self.history_dt = list(self.fullhist_dict.values())[0].dt
def init_timers_and_counters(self): """Start timers.""" # In warp we used a specific walltime counter it had # (warp.top.steptime). Not sure what issues we'll hit just using time # here. self.prev_time = time.time() self.start_time = self.prev_time self.prev_step = mwxrun.get_it() self.particle_steps_total = 0 self.previous_particle_steps_total = 0
def test_extra_steps_after_restart(): testing_util.initialize_testingdir("test_extra_steps_after_restart") # use a fixed random seed np.random.seed(47239475) run = get_run() additional_steps = 8 # restart from checkpoint created by test_create_checkpoints mwxrun.init_run(restart=True, checkpoint_dir=os.path.join(testing_util.test_dir, "checkpoint"), additional_steps=additional_steps) start_step = mwxrun.get_it() mwxrun.simulation.step() end_step = mwxrun.get_it() assert start_step + additional_steps == end_step restart_net_charge_density = np.load( os.path.join(run.field_diag.write_dir, "Net_charge_density_0000000008.npy")) # compare against data from test_create_checkpoints original_net_charge_density = np.load( os.path.join(testing_util.test_dir, "checkpoint", "Net_charge_density_0000000008.npy")) assert np.allclose(restart_net_charge_density, original_net_charge_density, rtol=0.1)
def print_performance_summary(self): total_time = time.time() - self.start_time total_timesteps = mwxrun.get_it() steps_per_second = total_timesteps / total_time steps_per_second_per_proc = steps_per_second / mwxrun.n_procs particle_steps_per_second = self.particle_steps_total / total_time particle_steps_per_second_per_proc = ( particle_steps_per_second / mwxrun.n_procs ) logger.info("### Run Summary ###") logger.info(f"steps / second : {steps_per_second:.4f}") logger.info(f"steps / second / proc : {steps_per_second_per_proc:.4f}") logger.info(f"particle * steps / second : {particle_steps_per_second:.4f}") logger.info(f"particle * steps / second / proc : {particle_steps_per_second_per_proc:.4f}")
def check_timestep(self): """Check if the diagnostic should run on this timestep. Returns: l_execute (bool): If True, run on this timestep. If False, don't. """ it = mwxrun.get_it() if self.manual_timesteps is not None: return (it % self.diag_steps) in self.manual_timesteps if (it % self.diag_steps) == self.diag_step_offset: return ( (self.extended_interval_level is None) or self._comp_calcinterval(it) ) return False
def _check_charge_conservation(self): """Function to check net current flow into simulation during the last diagnostic period. """ full_ts = timeseries.concat_crop_timeseries([ val for key, val in self.ts_dict.items()]) net_current = full_ts.get_averagevalue_by_key('J') if abs(net_current) > 0.5: if abs(net_current) > 1e3: raise RuntimeError( ('Step %d: Net current exceeds 1000 A/cm^2, which is almost' ' definitely an error.') % mwxrun.get_it() ) logger.warning( f"Step {mwxrun.get_it()}: Net current ({net_current:.3f} " "A/cm^2) exceeds 0.5 A/cm^2, which likely indicates a " "violation of the CFL condition." )
def _flux_ana(self): """Perform the calculation and processing of current data from the dataframes. The Timeseries object is used to store multiple keys for one collection of injectors and/or surfaces and one species. To facilitate that, we go by diagnostic object, create a timeseries by species, and concatenate timeseries by group of objects. We also keep a from-the-beginning tally of similar timeseries, so we can append the present timeseries to them, and then resample those if needed. """ if self.check_timestep(): self.update_ts_dict() if mwxrun.me == 0: self.update_fullhist_dict() if self.check_charge_conservation: self._check_charge_conservation() if self.print_per_diagnostic: logger.info( "\nTHIS DIAGNOSTIC PERIOD:\n" + self.print_fluxes(self.ts_dict) ) if self.print_total: logger.info( "\nTOTAL HISTORY:\n" + self.print_fluxes(self.fullhist_dict) ) if self.plot: self.plot_fluxes(self.fullhist_dict, save=True) self.save() self.last_run_step = mwxrun.get_it()
def run_scattering_method(self): """Function to execute the Langevin Coulomb collision operator scattering method.""" if (mwxrun.get_it() - 1) % self.subcycling_steps == 0: self.get_grid_quantities() # Short-circuit if no particles present. This is both more efficient # and avoids crashes empty particle lists can cause. if mwxrun.sim_ext.get_particle_count(self.collider.name) == 0: return # collect electron particle positions (structs-of-arrays) structs = mwxrun.sim_ext.get_particle_structs(self.collider.name, 0) # collect electron particle velocities (array-of-structs) ux_arrays = mwxrun.sim_ext.get_particle_ux(self.collider.name) uy_arrays = mwxrun.sim_ext.get_particle_uy(self.collider.name) uz_arrays = mwxrun.sim_ext.get_particle_uz(self.collider.name) # loop over tiles and scatter the electrons appropriately for ii in range(len(structs)): ux = ux_arrays[ii] uy = uy_arrays[ii] uz = uz_arrays[ii] # create a new array of the velocity components for convenience v = np.array([ux, uy, uz]) v_mag = np.sqrt(np.sum(v**2, axis=0)) v_perp = np.sqrt(v[0]**2 + v[1]**2) # interpolate ion density to electron positions coords = np.zeros((mwxrun.dim, len(ux))) if mwxrun.geom_str == 'Z': coords[0] = structs[ii]['x'] elif mwxrun.geom_str == 'XZ': coords[0] = structs[ii]['x'] coords[1] = structs[ii]['y'] density = mwxutil.interpolate_from_grid(coords, self.ion_density_grid) # calculate diffusion coefficient assuming infinitely massive ions coulomb_log = self.get_coulomb_log(coords) d11 = self.nu_coef * density * coulomb_log / v_mag # generate diffusion scattering vectors in the perpendicular plane sigma = np.sqrt(mwxrun.get_dt() * d11) Q1 = np.random.normal(0, sigma, len(v_mag)) Q2 = np.random.normal(0, sigma, len(v_mag)) # calculate rotation angles to parallel coordinates frame cos_theta = v[2] / v_mag sin_theta = v_perp / v_mag cos_phi = v[0] / v_perp sin_phi = v[1] / v_perp # enforce energy conservation dif = v_mag**2 - Q1**2 - Q2**2 # pick out unphysical points - for these we use isotropic scattering idx = np.where(dif <= 0)[0] isotropized_vels = mwxutil.get_vel_vector(v_mag[idx]) Q1[idx] = isotropized_vels[:, 0] Q2[idx] = isotropized_vels[:, 1] dif[idx] = isotropized_vels[:, 2]**2 Q3 = np.sqrt(dif) - v_mag # add the dynamical friction component # F = self.nu_coef * density * coulomb_log / v_mag**2 # Q3 -= F * warp.top.dt # transform Q from the parallel coordinates to the lab frame Q = np.array([ Q1 * cos_theta * cos_phi - Q2 * sin_phi + Q3 * sin_theta * cos_phi, Q1 * cos_theta * sin_phi + Q2 * cos_phi + Q3 * sin_theta * sin_phi, -Q1 * sin_theta + Q3 * cos_theta ]) # compare initial and final kinetic energy for sanity checking # E_init = 0.5 * constants.m_e / constants.eV_SI * ( # ux**2 + uy**2 + uz**2 # ) ux[:] += Q[0] uy[:] += Q[1] uz[:] += Q[2]
def check_for_end_of_sim(self): if mwxrun.get_it() == mwxrun.simulation.max_steps: self.do_post_processing()
def eval_total_steps(self): """Evaluate whether the total timesteps have been reached.""" if mwxrun.get_it() >= self.total_steps: logger.info("SimControl: Total steps reached") return False return True
def fields_diag(self): """Function to process (get, plot and save) field quantities. This function is called on every step, but only executes if check_timestep() evaluated to True. """ if (self.post_processing and (mwxrun.get_it() == mwxrun.simulation.max_steps)): self.do_post_processing() if not self.check_timestep(): return logger.info("Analyzing fields...") self.it = mwxrun.get_it() if self.process_phi: data = mwxrun.get_gathered_phi_grid(include_ghosts=False) self.process_field(data=data, titlestr='Electrostatic potential', plottype='phi', draw_image=True, default_ticks=True, draw_contourlines=False) # Optionally generate barrier index plot if requested if self.plot and (self.barrier_slices is not None): self.plot_barrier_slices(data, self.barrier_slices) if self.process_E: raise NotImplementedError( "E-field processing not yet implemented.") self.process_field(data=None, titlestr='Electric field strength', plottype='E', draw_image=True, default_ticks=True, draw_contourlines=False) if self.process_rho: # assume that rho_fp still holds the net charge density data = mwxrun.get_gathered_rho_grid(include_ghosts=False) * 1e-6 if mwxrun.dim == 1: data = data[:, 0] elif mwxrun.dim == 2: data = data[:, :, 0] self.process_field(data=data, titlestr='Net charge density', plottype='rho', draw_image=True, default_ticks=True, draw_contourlines=False) # deposit the charge density for each species for species in self.species_list: data = (mwxrun.get_gathered_rho_grid(species_name=species.name, include_ghosts=False) / species.sq * 1e-6) if mwxrun.dim == 1: data = data[:, 0] elif mwxrun.dim == 2: data = data[:, :, 0] self.process_field(data=data, titlestr=f'{species.name} particle density', plottype='n', draw_image=True, default_ticks=True, draw_contourlines=False) logger.info("Finished analyzing fields")
def record_scrapedparticles(self): """Handles transforming raw particle information from the WarpX scraped particle buffer to the information used to record particles as a function of time. Note: Assumes the fixed form of fields given in Assembly(). Doesn't check since this is called many times. Note: The total charge scraped and energy of the particles scraped is multiplied by -1 since these quantities are leaving the system. """ # skip conductors that don't have a label to get scraped particles with if not hasattr(self, 'scraper_label'): logger.warning(f"Assembly {self.name} doesn't have a scraper label") return # loop over species and get the scraped particle data from the buffer for species in mwxrun.simulation.species: data = np.zeros(7) data[0] = mwxrun.get_t() data[1] = mwxrun.get_it() data[2] = species.species_number data[3] = self.getvoltage_e() # When pre-seeding a simulation with plasma we inject particles over # embedded boundaries which causes the first scraping step to # show very large currents. For this reason we skip that first step # but note that we inject after the first step so we need to skip # scraping step 2. if data[1] == 2: self.append_scrapedparticles(data) continue empty = True idx_list = [] # get the number of particles in the buffer - this is primarily # to avoid trying to access the buffer if it is not defined # which causes a segfault buffer_count = mwxrun.sim_ext.get_particle_boundary_buffer_size( species.name, self.scraper_label ) # logger.info(f"{self.name} scraped {buffer_count} {species.name}") if buffer_count > 0: # get the timesteps at which particles were scraped comp_steps = mwxrun.sim_ext.get_particle_boundary_buffer( species.name, self.scraper_label, "step_scraped", mwxrun.lev ) # get the particles that were scraped in this timestep for arr in comp_steps: idx_list.append(np.where(arr == mwxrun.get_it())[0]) if len(idx_list[-1]) != 0: empty = False # sort the particles appropriately if this is an eb if not empty and self.scraper_label == 'eb': temp_idx_list = [] structs = mwxrun.sim_ext.get_particle_boundary_buffer_structs( species.name, self.scraper_label, mwxrun.lev ) if mwxrun.geom_str == 'XZ': xpos = [struct['x'] for struct in structs] ypos = [struct['y'] for struct in structs] zpos = [struct['y'] for struct in structs] elif mwxrun.geom_str == 'XYZ': xpos = [struct['x'] for struct in structs] ypos = [struct['y'] for struct in structs] zpos = [struct['z'] for struct in structs] elif mwxrun.geom_str == 'RZ': xpos = [struct['x'] for struct in structs] ypos = [np.zeros(len(struct['y'])) for struct in structs] zpos = [struct['y'] for struct in structs] else: raise NotImplementedError( f"Scraping not implemented for {mwxrun.geom_str}." ) for i in range(len(idx_list)): is_inside = self.isinside( xpos[i][idx_list[i]], ypos[i][idx_list[i]], zpos[i][idx_list[i]] ) temp_idx_list.append(idx_list[i][np.where(is_inside)]) # set the scraped timestep to -1 for particles in this # EB so that they are not considered again comp_steps[i][idx_list[i][np.where(is_inside)]] = -1 idx_list = temp_idx_list if not empty: data[4] = sum(np.size(idx) for idx in idx_list) w_arrays = mwxrun.sim_ext.get_particle_boundary_buffer( species.name, self.scraper_label, "w", mwxrun.lev ) data[5] = -species.sq * sum(np.sum(w_arrays[i][idx_list[i]]) for i in range(len(idx_list)) ) E_arrays = mwxrun.sim_ext.get_particle_boundary_buffer( species.name, self.scraper_label, "E_total", mwxrun.lev ) data[6] = -sum(np.sum(E_arrays[i][idx_list[i]]) for i in range(len(idx_list)) ) self.append_scrapedparticles(data)
def plot_fluxes(self, ts_dict, save=False): """ Arguments: save (bool): If True, save and close figure. write_dir must be defined. If False, leave figure open and return it. """ fig, axlist = plt.subplots(2, 2, figsize=(14, 8.5)) # List of axes properties axlist = [x for y in axlist for x in y] qty_list = [ {'key': 'J', 'ylabel': r'Current (A/$\mathrm{cm}^2$)', 'title': 'Simulation currents into system'}, {'key': 'P', 'ylabel': r'Power (W/$\mathrm{cm}^2$)', 'title': 'Component power production'}, {'key': 'dQ', 'ylabel': r'Heat (W/$\mathrm{cm}^2$)', 'title': 'Component heat transfer into system'}, {'key': 'n', 'ylabel': 'Macroparticles', 'title': 'Simulation particles handled'}, ] try: label_list = [ '_'.join([ x[0].title(), x[1].title(), x[2].title() ]) for x in list(ts_dict.keys()) ] except KeyError: logger.error( "NOTE: KeyErrors in flux plotting can be caused by creating " "species after RunInfo is saved (eg by initiating a " "TraceParticleInjector after RunInfo). All species should be " "initiated before RunInfo." ) raise if mwxrun.get_it() * mwxrun.get_dt() < 0.5e-6: xlabel = 'Time (ns)' else: xlabel = r'Time ($\mu$s)' for ax, qtydict in zip(axlist, qty_list): typekey = qtydict['key'] timeseries.TimeseriesPlot( array_list=[ ( label, ts_dict[x].get_timeseries_by_key(typekey) ) for (label, x) in zip(label_list, list(ts_dict.keys())) ], ax=ax, xlabel=xlabel, ylabel=qtydict['ylabel'], title=qtydict['title'], titlesize=18, labelsize=16, alpha=0.7, legend=False ) fig.legend(ax.get_lines(), label_list, 'lower center', fontsize=16, frameon=True, ncol=3) fig.tight_layout() fig.subplots_adjust(bottom=0.13 + int((len(label_list)-1) / 3) * 0.04, hspace=0.33) if save: fig.savefig( os.path.join( self.write_dir, 'flux_plots_{:010d}.png'.format(mwxrun.get_it()) ), dpi=300 ) plt.close(fig) return fig