class RunLegion(Run): def __init__(self, exp, run_no, run_src, **kwargs): """ Parallel read using Legion """ super(RunLegion, self).__init__(exp, run_no, max_events = kwargs['max_events'], batch_size = kwargs['batch_size'], filter_callback = kwargs['filter_callback'], prom_man = kwargs['prom_man']) xtc_files, smd_files, other_files = run_src # get Configure and BeginRun using SmdReader self.smd_fds = np.array([os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() self.beginruns = self.smdr_man.get_next_dgrams(configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs) super()._set_configinfo() super()._set_calibconst() self.esm = EnvStoreManager(self.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns : return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def analyze(self, **kwargs): return legion_node.analyze(self, **kwargs)
class RunSerial(Run): """ Yields list of events from multiple smd/bigdata files using single core.""" def __init__(self, exp, run_no, run_src, **kwargs): super(RunSerial, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() self.beginruns = self.smdr_man.get_next_dgrams(configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs) super()._set_configinfo() super()._set_calibconst() self.esm = EnvStoreManager(self.smd_dm.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns: return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def events(self): events = Events(self) for evt in events: if evt.service() == TransitionId.L1Accept: st = time.time() yield evt en = time.time() self.c_ana.labels('seconds', 'None').inc(en - st) self.c_ana.labels('batches', 'None').inc() def steps(self): """ Generates events between steps. """ events = Events(self) for evt in events: if evt.service() == TransitionId.BeginStep: yield Step(evt, events)
class RunParallel(Run): """ Yields list of events from multiple smd/bigdata files using > 3 cores.""" def __init__(self, comms, exp, run_no, run_src, **kwargs): """ Parallel read requires that rank 0 does the file system works. Configs and calib constants are sent to other ranks by MPI. Note that destination callback only works with RunParallel. """ super(RunParallel, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], destination=kwargs['destination'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src self.comms = comms psana_comm = comms.psana_comm # TODO tjl and cpo to review rank = psana_comm.Get_rank() size = psana_comm.Get_size() g_ts = self.prom_man.get_metric("psana_timestamp") if rank == 0: # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() g_ts.labels("first_event").set(time.time()) self.beginruns = self.smdr_man.get_next_dgrams( configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, run=self, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs, run=self) nbytes = np.array([memoryview(config).shape[0] for config in self.configs], \ dtype='i') super()._set_configinfo() super()._set_calibconst() self.bcast_packets = {'calibconst': self.calibconst, \ 'expt': self.expt, 'runnum': self.runnum, 'timestamp': self.timestamp} else: self.smd_dm = None self.dm = None self.configs = None nbytes = np.empty(len(smd_files), dtype='i') self.bcast_packets = None # Send configs without pickling psana_comm.Bcast(nbytes, root=0) # no. of bytes is required for mpich if rank > 0: self.configs = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(self.configs)): psana_comm.Bcast([self.configs[i], nbytes[i], MPI.BYTE], root=0) # Send other small things using small-case bcast self.bcast_packets = psana_comm.bcast(self.bcast_packets, root=0) if rank > 0: self.configs = [ dgram.Dgram(view=config, offset=0) for config in self.configs ] g_ts.labels("first_event").set(time.time()) self.dm = DgramManager(xtc_files, configs=self.configs, run=self) super()._set_configinfo( ) # after creating a dgrammanger, we can setup config info self.calibconst = self.bcast_packets['calibconst'] self.expt = self.bcast_packets['expt'] self.runnum = self.bcast_packets['runnum'] self.timestamp = self.bcast_packets['timestamp'] self.esm = EnvStoreManager(self.configs, 'epics', 'scan') def _get_runinfo(self): if not self.beginruns: return beginrun_dgram = self.beginruns[0] if hasattr(beginrun_dgram, 'runinfo'): # some xtc2 do not have BeginRun self.expt = beginrun_dgram.runinfo[0].runinfo.expt self.runnum = beginrun_dgram.runinfo[0].runinfo.runnum self.timestamp = beginrun_dgram.timestamp() def events(self): for evt in self.run_node(): if evt.service() != TransitionId.L1Accept: continue st = time.time() yield evt en = time.time() self.c_ana.labels('seconds', 'None').inc(en - st) self.c_ana.labels('batches', 'None').inc() self.close() def steps(self): self.scan = True for step in self.run_node(): yield step self.close() def run_node(self): if self.comms._nodetype == 'smd0': Smd0(self) elif self.comms._nodetype == 'smd': smd_node = SmdNode(self) smd_node.run_mpi() elif self.comms._nodetype == 'bd': bd_node = BigDataNode(self) for result in bd_node.run_mpi(): yield result elif self.comms._nodetype == 'srv': # tell the iterator to do nothing return