def execute(self, pset, endtime, dt, recovery=None, output_file=None, execute_once=False): """Execute this Kernel over a ParticleSet for several timesteps""" particles = pset.data_accessor() for p in range(pset.size): particles.set_index(p) particles.set_state(StateCode.Evaluate) if abs(dt) < 1e-6 and not execute_once: logger.warning_once("'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)") def remove_deleted(pset): """Utility to remove all particles that signalled deletion""" indices = pset.particle_data['state'] == OperationCode.Delete if np.count_nonzero(indices) > 0 and output_file is not None: output_file.write(pset, endtime, deleted_only=indices) pset.remove_booleanvector(indices) if recovery is None: recovery = {} elif ErrorCode.ErrorOutOfBounds in recovery and ErrorCode.ErrorThroughSurface not in recovery: recovery[ErrorCode.ErrorThroughSurface] = recovery[ErrorCode.ErrorOutOfBounds] recovery_map = recovery_base_map.copy() recovery_map.update(recovery) if pset.fieldset is not None: for g in pset.fieldset.gridset.grids: if len(g.load_chunk) > 0: # not the case if a field in not called in the kernel g.load_chunk = np.where(g.load_chunk == 2, 3, g.load_chunk) # Execute the kernel over the particle set if self.ptype.uses_jit: self.execute_jit(pset, endtime, dt) else: self.execute_python(pset, endtime, dt) # Remove all particles that signalled deletion remove_deleted(pset) # Identify particles that threw errors error_particles = np.isin(pset.particle_data['state'], [StateCode.Success, StateCode.Evaluate], invert=True) while np.any(error_particles): # Apply recovery kernel for p in np.where(error_particles)[0]: particles.set_index(p) if particles.state == OperationCode.StopExecution: return if particles.state == OperationCode.Repeat: particles.set_state(StateCode.Evaluate) elif particles.state in recovery_map: recovery_kernel = recovery_map[particles.state] particles.set_state(StateCode.Success) recovery_kernel(particles, self.fieldset, particles.time) if particles.state == StateCode.Success: particles.set_state(StateCode.Evaluate) else: logger.warning_once('Deleting particle because of bug in #749 and #737') particles.delete() # Remove all particles that signalled deletion remove_deleted(pset) # Execute core loop again to continue interrupted particles if self.ptype.uses_jit: self.execute_jit(pset, endtime, dt) else: self.execute_python(pset, endtime, dt) error_particles = np.isin(pset.particle_data['state'], [StateCode.Success, StateCode.Evaluate], invert=True)
def execute(self, pset, endtime, dt, recovery=None, output_file=None, execute_once=False): """Execute this Kernel over a ParticleSet for several timesteps""" pset.collection.state[:] = StateCode.Evaluate if abs(dt) < 1e-6 and not execute_once: logger.warning_once("'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)") if recovery is None: recovery = {} elif ErrorCode.ErrorOutOfBounds in recovery and ErrorCode.ErrorThroughSurface not in recovery: recovery[ErrorCode.ErrorThroughSurface] = recovery[ErrorCode.ErrorOutOfBounds] recovery_map = recovery_base_map.copy() recovery_map.update(recovery) if pset.fieldset is not None: for g in pset.fieldset.gridset.grids: if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, g.chunk_deprecated, g.load_chunk) # Execute the kernel over the particle set if self.ptype.uses_jit: self.execute_jit(pset, endtime, dt) else: self.execute_python(pset, endtime, dt) # Remove all particles that signalled deletion if type(pset).__name__ in ['ParticleSetSOA', ]: self.remove_deleted_soa(pset, output_file=output_file, endtime=endtime) else: self.remove_deleted(pset, output_file=output_file, endtime=endtime) # Generalizable version! # Identify particles that threw errors n_error = pset.num_error_particles # while np.any(error_particles): while n_error > 0: error_pset = pset.error_particles # Apply recovery kernel for p in error_pset: if p.state == OperationCode.StopExecution: return if p.state == OperationCode.Repeat: p.set_state(StateCode.Evaluate) elif p.state == OperationCode.Delete: pass elif p.state in recovery_map: recovery_kernel = recovery_map[p.state] p.set_state(StateCode.Success) recovery_kernel(p, self.fieldset, p.time) if p.state == StateCode.Success: p.set_state(StateCode.Evaluate) else: logger.warning_once('Deleting particle {} because of non-recoverable error'.format(p.id)) p.delete() # Remove all particles that signalled deletion if type(pset).__name__ in ['ParticleSetSOA', ]: self.remove_deleted_soa(pset, output_file=output_file, endtime=endtime) else: self.remove_deleted(pset, output_file=output_file, endtime=endtime) # Generalizable version! # Execute core loop again to continue interrupted particles if self.ptype.uses_jit: self.execute_jit(pset, endtime, dt) else: self.execute_python(pset, endtime, dt) n_error = pset.num_error_particles