def __init__(self, exp, run_no, run_src, **kwargs): """ Parallel read using Legion """ super(RunLegion, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() self.beginruns = self.smdr_man.get_next_dgrams(configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs) super()._set_configinfo() super()._set_calibconst() self.esm = EnvStoreManager(self.configs, 'epics', 'scan')
class RunLegion(Run): def __init__(self, exp, run_no, run_src, **kwargs): """ Parallel read using Legion """ super(RunLegion, self).__init__(exp, run_no, max_events = kwargs['max_events'], batch_size = kwargs['batch_size'], filter_callback = kwargs['filter_callback'], prom_man = kwargs['prom_man']) xtc_files, smd_files, other_files = run_src # get Configure and BeginRun using SmdReader self.smd_fds = np.array([os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() self.beginruns = self.smdr_man.get_next_dgrams(configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs) super()._set_configinfo() super()._set_calibconst() self.esm = EnvStoreManager(self.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns : return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def analyze(self, **kwargs): return legion_node.analyze(self, **kwargs)
def run_smd0_task(run): global_procs = legion.Tunable.select(legion.Tunable.GLOBAL_PYS).get() smdr_man = SmdReaderManager(run.smd_dm.fds, run.max_events) for i, (smd_chunk, update_chunk) in enumerate(smdr_man.chunks()): run_smd_task(smd_chunk, run, point=i) # Block before returning so that the caller can use this task's future for synchronization legion.execution_fence(block=True)
class RunSerial(Run): """ Yields list of events from multiple smd/bigdata files using single core.""" def __init__(self, exp, run_no, run_src, **kwargs): super(RunSerial, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() self.beginruns = self.smdr_man.get_next_dgrams(configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs) super()._set_configinfo() super()._set_calibconst() self.esm = EnvStoreManager(self.smd_dm.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns: return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def events(self): events = Events(self) for evt in events: if evt.service() == TransitionId.L1Accept: st = time.time() yield evt en = time.time() self.c_ana.labels('seconds', 'None').inc(en - st) self.c_ana.labels('batches', 'None').inc() def steps(self): """ Generates events between steps. """ events = Events(self) for evt in events: if evt.service() == TransitionId.BeginStep: yield Step(evt, events)
def __init__(self, run, get_smd=0): self.run = run self.get_smd = get_smd # RunParallel if not self.get_smd: self._smdr_man = SmdReaderManager(run) # RunSerial self._evt_man = iter([]) self._batch_iter = iter([]) self.flag_empty_smd_batch = False
def __init__(self, run, get_smd=0, dm=None): self.run = run self.get_smd = get_smd # RunParallel self.dm = dm # RunSingleFile, RunShmem if self.get_smd == 0 and self.dm is None: self._smdr_man = SmdReaderManager(run) # RunSerial self._evt_man = iter([]) self._batch_iter = iter([]) self.flag_empty_smd_batch = False
def steps(self): current_step_pos = 0 """ Generates events between steps. """ smdr_man = SmdReaderManager(self.smd_dm.fds, self.max_events) for i, (smd_chunk, step_chunk) in enumerate(smdr_man.chunks()): # Update step stores step_pf = PacketFooter(view=step_chunk) step_views = step_pf.split_packets() self.epics_store.update(step_views) self.step_store.update(step_views) eb_man = EventBuilderManager(smd_chunk, self.configs, \ batch_size=self.batch_size, filter_fn=self.filter_callback) for i, step_dgram in enumerate( self.step_store.dgrams(from_pos=current_step_pos + 1)): if step_dgram: limit_ts = step_dgram.seq.timestamp() current_step_pos += 1 else: limit_ts = -1 yield Step(self, eb_man=eb_man, limit_ts=limit_ts)
def events(self): ev_man = EventManager(self.configs, self.dm, \ filter_fn=self.filter_callback) #get smd chunks smdr_man = SmdReaderManager(self.smd_dm.fds, self.max_events) for (smd_chunk, step_chunk) in smdr_man.chunks(): # Update epics_store for each chunk step_pf = PacketFooter(view=step_chunk) step_views = step_pf.split_packets() self.epics_store.update(step_views) eb_man = EventBuilderManager(smd_chunk, self.configs, batch_size=self.batch_size, filter_fn=self.filter_callback) for batch_dict in eb_man.batches(): batch, _ = batch_dict[ 0] # there's only 1 dest_rank for serial run for evt in ev_man.events(batch): if evt._dgrams[0].seq.service() != 12: continue yield evt
class Smd0(object): """ Sends blocks of smds to smd_node Identifies limit timestamp of the slowest detector then sends all smds within that timestamp to an smd_node. """ def __init__(self, run): self.smdr_man = SmdReaderManager(run) self.run = run self.step_hist = StepHistory(self.run.comms.smd_size, len(self.run.configs)) self.run_mpi() def run_mpi(self): rankreq = np.empty(1, dtype='i') for (smd_chunk, step_chunk) in self.smdr_man.chunks(): # Creates a chunk from smd and epics data to send to SmdNode # Anatomy of a chunk (pf=packet_footer): # [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ] # ----- smd_chunk ------ ---------epics_chunk------- # -------------------------- chunk ------------------------------ # Read new epics data as available in the queue # then send only unseen portion of data to the evtbuilder rank. if not smd_chunk: break self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE) # Check missing steps for the current client missing_step_views = self.step_hist.get_buffer(rankreq[0]) # Update step buffers (after getting the missing steps step_pf = PacketFooter(view=step_chunk) step_views = step_pf.split_packets() self.step_hist.extend_buffers(step_views, rankreq[0]) smd_extended = repack_for_eb(smd_chunk, missing_step_views, self.run.configs) self.run.comms.smd_comm.Send(smd_extended, dest=rankreq[0]) for i in range(self.run.comms.n_smd_nodes): self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE) self.run.comms.smd_comm.Send(bytearray(), dest=rankreq[0])
class Smd0(object): """ Sends blocks of smds to smd_node Identifies limit timestamp of the slowest detector then sends all smds within that timestamp to an smd_node. """ def __init__(self, run): self.smdr_man = SmdReaderManager(run.smd_dm.fds, run.max_events) self.run = run self.epics_man = UpdateManager(smd_size, self.run.epics_store.n_files) self.run_mpi() def run_mpi(self): rankreq = np.empty(1, dtype='i') for (smd_chunk, update_chunk) in self.smdr_man.chunks(): # Creates a chunk from smd and epics data to send to SmdNode # Anatomy of a chunk (pf=packet_footer): # [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ] # ----- smd_chunk ------ ---------epics_chunk------- # -------------------------- chunk ------------------------------ # Read new epics data as available in the queue # then send only unseen portion of data to the evtbuilder rank. update_pf = PacketFooter(view=update_chunk) self.epics_man.extend_buffers(update_pf.split_packets()) smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE) epics_chunk = self.epics_man.get_buffer(rankreq[0]) pf = PacketFooter(2) pf.set_size(0, memoryview(smd_chunk).shape[0]) pf.set_size(1, memoryview(epics_chunk).shape[0]) chunk = smd_chunk + epics_chunk + pf.footer smd_comm.Send(chunk, dest=rankreq[0]) for i in range(PS_SMD_NODES): smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE) smd_comm.Send(bytearray(), dest=rankreq[0])
def __init__(self, run): self.smdr_man = SmdReaderManager(run) self.run = run self.step_hist = StepHistory(self.run.comms.smd_size, len(self.run.configs)) self.run_mpi()
def __init__(self, comms, exp, run_no, run_src, **kwargs): """ Parallel read requires that rank 0 does the file system works. Configs and calib constants are sent to other ranks by MPI. Note that destination callback only works with RunParallel. """ super(RunParallel, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], destination=kwargs['destination'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src self.comms = comms psana_comm = comms.psana_comm # TODO tjl and cpo to review rank = psana_comm.Get_rank() size = psana_comm.Get_size() g_ts = self.prom_man.get_metric("psana_timestamp") if rank == 0: # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() g_ts.labels("first_event").set(time.time()) self.beginruns = self.smdr_man.get_next_dgrams( configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, run=self, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs, run=self) nbytes = np.array([memoryview(config).shape[0] for config in self.configs], \ dtype='i') super()._set_configinfo() super()._set_calibconst() self.bcast_packets = {'calibconst': self.calibconst, \ 'expt': self.expt, 'runnum': self.runnum, 'timestamp': self.timestamp} else: self.smd_dm = None self.dm = None self.configs = None nbytes = np.empty(len(smd_files), dtype='i') self.bcast_packets = None # Send configs without pickling psana_comm.Bcast(nbytes, root=0) # no. of bytes is required for mpich if rank > 0: self.configs = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(self.configs)): psana_comm.Bcast([self.configs[i], nbytes[i], MPI.BYTE], root=0) # Send other small things using small-case bcast self.bcast_packets = psana_comm.bcast(self.bcast_packets, root=0) if rank > 0: self.configs = [ dgram.Dgram(view=config, offset=0) for config in self.configs ] g_ts.labels("first_event").set(time.time()) self.dm = DgramManager(xtc_files, configs=self.configs, run=self) super()._set_configinfo( ) # after creating a dgrammanger, we can setup config info self.calibconst = self.bcast_packets['calibconst'] self.expt = self.bcast_packets['expt'] self.runnum = self.bcast_packets['runnum'] self.timestamp = self.bcast_packets['timestamp'] self.esm = EnvStoreManager(self.configs, 'epics', 'scan')
class RunParallel(Run): """ Yields list of events from multiple smd/bigdata files using > 3 cores.""" def __init__(self, comms, exp, run_no, run_src, **kwargs): """ Parallel read requires that rank 0 does the file system works. Configs and calib constants are sent to other ranks by MPI. Note that destination callback only works with RunParallel. """ super(RunParallel, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], destination=kwargs['destination'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src self.comms = comms psana_comm = comms.psana_comm # TODO tjl and cpo to review rank = psana_comm.Get_rank() size = psana_comm.Get_size() g_ts = self.prom_man.get_metric("psana_timestamp") if rank == 0: # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() g_ts.labels("first_event").set(time.time()) self.beginruns = self.smdr_man.get_next_dgrams( configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, run=self, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs, run=self) nbytes = np.array([memoryview(config).shape[0] for config in self.configs], \ dtype='i') super()._set_configinfo() super()._set_calibconst() self.bcast_packets = {'calibconst': self.calibconst, \ 'expt': self.expt, 'runnum': self.runnum, 'timestamp': self.timestamp} else: self.smd_dm = None self.dm = None self.configs = None nbytes = np.empty(len(smd_files), dtype='i') self.bcast_packets = None # Send configs without pickling psana_comm.Bcast(nbytes, root=0) # no. of bytes is required for mpich if rank > 0: self.configs = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(self.configs)): psana_comm.Bcast([self.configs[i], nbytes[i], MPI.BYTE], root=0) # Send other small things using small-case bcast self.bcast_packets = psana_comm.bcast(self.bcast_packets, root=0) if rank > 0: self.configs = [ dgram.Dgram(view=config, offset=0) for config in self.configs ] g_ts.labels("first_event").set(time.time()) self.dm = DgramManager(xtc_files, configs=self.configs, run=self) super()._set_configinfo( ) # after creating a dgrammanger, we can setup config info self.calibconst = self.bcast_packets['calibconst'] self.expt = self.bcast_packets['expt'] self.runnum = self.bcast_packets['runnum'] self.timestamp = self.bcast_packets['timestamp'] self.esm = EnvStoreManager(self.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns: return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def events(self): for evt in self.run_node(): if evt.service() != TransitionId.L1Accept: continue st = time.time() yield evt en = time.time() self.c_ana.labels('seconds', 'None').inc(en - st) self.c_ana.labels('batches', 'None').inc() self.close() def steps(self): self.scan = True for step in self.run_node(): yield step self.close() def run_node(self): if self.comms._nodetype == 'smd0': Smd0(self) elif self.comms._nodetype == 'smd': smd_node = SmdNode(self) smd_node.run_mpi() elif self.comms._nodetype == 'bd': bd_node = BigDataNode(self) for result in bd_node.run_mpi(): yield result elif self.comms._nodetype == 'srv': # tell the iterator to do nothing return
def __init__(self, run): self.smdr_man = SmdReaderManager(run.smd_dm.fds, run.max_events) self.run = run self.epics_man = UpdateManager(smd_size, self.run.epics_store.n_files) self.run_mpi()
def smd_chunks(run): smdr_man = SmdReaderManager(run) for smd_chunk, update_chunk in smdr_man.chunks(): yield smd_chunk
import os, glob from psana.psexp.smdreader_manager import SmdReaderManager class Container(object): pass class Run(object): def __init__(self): filenames = glob.glob( '/reg/neh/home/monarin/lcls2/psana/psana/tests/.tmp_smd0/.tmp/smalldata/*.xtc2' ) fds = [os.open(f, os.O_RDONLY) for f in filenames] self.smd_dm = Container() setattr(self.smd_dm, 'fds', fds) self.max_events = 1000 if __name__ == "__main__": run = Run() os.environ['PS_SMD_N_EVENTS'] = "1" smdr_man = SmdReaderManager(run) for chunk in smdr_man.chunks(): smd_chunk, step_chunk = chunk print( f'{memoryview(smd_chunk).nbytes} {memoryview(step_chunk).nbytes}')