def __init__(self, fds): self.max_events = 0 self.prom_man = PrometheusManager(0) self.batch_size = 1000 self.filter_callback = None self.destination = None self.smd_fds = fds
def __init__(self, **kwargs): """Initializes datasource base""" if kwargs is not None: self.smalldata_kwargs = {} keywords = ('exp', 'dir', 'files', 'shmem', 'filter', 'batch_size', 'max_events', 'detectors', 'det_name', 'destination', 'live', 'smalldata_kwargs', 'monitor') for k in keywords: if k in kwargs: setattr(self, k, kwargs[k]) if self.destination != 0: self.batch_size = 1 # reset batch_size to prevent L1 transmitted before BeginRun (FIXME?: Mona) if 'run' in kwargs: setattr(self, 'runnum', kwargs['run']) if 'files' in kwargs: if isinstance(self.files, str): self.files = [self.files] max_retries = 0 if self.live: max_retries = 3 assert self.batch_size > 0 self.prom_man = PrometheusManager(os.environ['PS_PROMETHEUS_JOBID']) self.dsparms = DsParms(self.batch_size, self.max_events, self.filter, self.destination, self.prom_man, max_retries)
def __init__(self, **kwargs): """Initializes datasource base""" self.filter = 0 # callback that takes an evt and return True/False. self.batch_size = 1 # length of batched offsets self.max_events = 0 # no. of maximum events self.detectors = [] # user-selected detector names self.exp = None # experiment id (e.g. xpptut13) self.runnum = None # run no. self.live = False # turns live mode on/off self.dir = None # manual entry for path to xtc files self.files = None # xtc2 file path self.shmem = None self.destination = 0 # callback that returns rank no. (used by EventBuilder) self.monitor = False # turns prometheus monitoring client of/off self.small_xtc = [] # swap smd file(s) with bigdata files for these detetors if kwargs is not None: self.smalldata_kwargs = {} keywords = ('exp', 'dir', 'files', 'shmem', 'filter', 'batch_size', 'max_events', 'detectors', 'det_name', 'destination', 'live', 'smalldata_kwargs', 'monitor', 'small_xtc', ) for k in keywords: if k in kwargs: setattr(self, k, kwargs[k]) if self.destination != 0: self.batch_size = 1 # reset batch_size to prevent L1 transmitted before BeginRun (FIXME?: Mona) if 'run' in kwargs: setattr(self, 'runnum', kwargs['run']) if 'files' in kwargs: if isinstance(self.files, str): self.files = [self.files] max_retries = 0 if self.live: max_retries = 3 assert self.batch_size > 0 self.prom_man = PrometheusManager(os.environ['PS_PROMETHEUS_JOBID']) self.dsparms = DsParms(self.batch_size, self.max_events, self.filter, self.destination, self.prom_man, max_retries)
def __init__(self, view, configs, dsparms): self.configs = configs self.batch_size = dsparms.batch_size self.filter_fn = dsparms.filter self.destination = dsparms.destination self.n_files = len(self.configs) pf = PacketFooter(view=view) views = pf.split_packets() self.eb = EventBuilder(views, self.configs) self.c_filter = PrometheusManager.get_metric('psana_eb_filter')
def run_smd0(): smd_dir = '/cds/data/drpsrcf/users/monarin/xtcdata/10M60n/xtcdata/smalldata' #smd_dir = '/cds/data/drpsrcf/users/monarin/tmoc00118/xtc/smalldata' filesize = 760057400 n_files = int(sys.argv[1]) filenames = [None] * n_files for i in range(n_files): filenames[i] = os.path.join(smd_dir, f'data-r0001-s{str(i).zfill(2)}.smd.xtc2') #filenames[i] = os.path.join(smd_dir,f'tmoc00118-r0463-s{str(i).zfill(3)}-c000.smd.xtc2') #filenames[i] = os.path.join(smd_dir,f'tmolv9418-r0175-s{str(i).zfill(3)}-c000.smd.xtc2') smd_fds = np.array( [os.open(filename, os.O_DIRECT) for filename in filenames], dtype=np.int32) st = time.time() prom_man = PrometheusManager(os.getpid()) dsparms = DsParms( batch_size=1, # bigdata batch size max_events=max_events, filter=0, destination=0, prom_man=prom_man, max_retries=0, live=False, found_xtc2_callback=0, timestamps=np.empty(0, dtype=np.uint64)) smdr_man = SmdReaderManager(smd_fds[:n_files], dsparms) for i_chunk in enumerate(smdr_man.chunks()): if not smdr_man.got_events: break found_endrun = smdr_man.smdr.found_endrun() if found_endrun: print(f'found EndRun') break print(f'total search time: {smdr_man.smdr.total_time}') en = time.time() processed_events = smdr_man.processed_events print( f"#Smdfiles: {n_files} #Events: {processed_events} Elapsed Time (s): {en-st:.2f} Rate (MHz): {processed_events/((en-st)*1e6):.2f} Bandwidth(GB/s):{filesize*n_files*1e-9/(en-st):.2f}" )
size = comm.Get_size() if __name__ == "__main__": # Use rank 0 processid and jobid - we'll need to broadcast this. if rank == 0: jobid = os.getpid() else: jobid = None jobid = comm.bcast(jobid, root=0) if rank == 0: print(f'jobid={jobid}', flush=True) # Setup Prometheus client thread for pushing data every 15 s. prom_man = PrometheusManager(jobid) e = threading.Event() t = threading.Thread(name='PrometheusThread%s' % (rank), target=prom_man.push_metrics, args=(e, rank), daemon=True) t.start() # Setup a random metric bd_wait_eb = PrometheusManager.get_metric('psana_bd_wait_eb') n_loops = 30 st_req = time.monotonic() for i in range(n_loops): time.sleep(1) en_req = time.monotonic() bd_wait_eb.labels('seconds', rank).inc(en_req - st_req)
from psana import dgram from psana.event import Event from psana.psexp import PacketFooter, TransitionId, PrometheusManager import numpy as np import os import time import logging logger = logging.getLogger(__name__) s_bd_just_read = PrometheusManager.get_metric('psana_bd_just_read') s_bd_gen_smd_batch = PrometheusManager.get_metric('psana_bd_gen_smd_batch') s_bd_gen_evt = PrometheusManager.get_metric('psana_bd_gen_evt') class EventManager(object): """ Return an event from the received smalldata memoryview (view) 1) If dm is empty (no bigdata), yield this smd event 2) If dm is not empty, - with filter fn, fetch one bigdata and yield it. - w/o filter fn, fetch one big chunk of bigdata and replace smalldata view with the read out bigdata. Yield one bigdata event. """ def __init__(self, view, smd_configs, dm, esm, filter_fn=0, prometheus_counter=None, max_retries=0, use_smds=[]): if view: pf = PacketFooter(view=view) self.n_events = pf.n_packets else:
def __init__(self, **kwargs): """Initializes datasource base""" self.filter = 0 # callback that takes an evt and return True/False. self.batch_size = 1000 # no. of events per batch sent to a bigdata core self.max_events = 0 # no. of maximum events self.detectors = [] # user-selected detector names self.exp = None # experiment id (e.g. xpptut13) self.runnum = None # run no. self.live = False # turns live mode on/off self.dir = None # manual entry for path to xtc files self.files = None # xtc2 file path self.shmem = None self.destination = 0 # callback that returns rank no. (used by EventBuilder) self.monitor = False # turns prometheus monitoring client of/off self.small_xtc = [ ] # swap smd file(s) with bigdata files for these detetors self.timestamps = np.empty(0, dtype=np.uint64) # list of user-selected timestamps self.dbsuffix = '' # calibration database name extension for private constants self.intg_det = '' # integrating detector name (contains marker ts for a batch) self.current_retry_no = 0 # global counting var for no. of read attemps if kwargs is not None: self.smalldata_kwargs = {} keywords = ( 'exp', 'dir', 'files', 'shmem', 'filter', 'batch_size', 'max_events', 'detectors', 'det_name', 'destination', 'live', 'smalldata_kwargs', 'monitor', 'small_xtc', 'timestamps', 'dbsuffix', 'intg_det', ) for k in keywords: if k in kwargs: if k == 'timestamps': msg = 'Numpy array or .npy filename is required for timestamps argument' assert isinstance(kwargs[k], (np.ndarray, str)), msg setattr(self, k, kwargs[k]) if self.destination != 0: self.batch_size = 1 # reset batch_size to prevent L1 transmitted before BeginRun (FIXME?: Mona) if 'run' in kwargs: setattr(self, 'runnum', kwargs['run']) if 'files' in kwargs: if isinstance(self.files, str): self.files = [self.files] if 'dbsuffix' in kwargs: setattr(self, 'dbsuffix', kwargs['dbsuffix']) max_retries = 0 if self.live: max_retries = int(os.environ.get('PS_R_MAX_RETRIES', '60')) assert self.batch_size > 0 self.prom_man = PrometheusManager(os.environ['PS_PROMETHEUS_JOBID']) self.dsparms = DsParms( self.batch_size, self.max_events, self.filter, self.destination, self.prom_man, max_retries, self.live, self.smd_inprogress_converted, self.timestamps, self.intg_det, ) if 'mpi_ts' not in kwargs: self.dsparms.set_timestamps() else: if kwargs['mpi_ts'] == 0: self.dsparms.set_timestamps()
from psana import dgram from psana.event import Event from psana.psexp import PacketFooter, TransitionId, PrometheusManager import numpy as np import os from psana.psexp.tools import Logging as logging import time s_bd_disk = PrometheusManager.get_metric('psana_bd_wait_disk') class EventManager(object): """ Return an event from the received smalldata memoryview (view) 1) If dm is empty (no bigdata), yield this smd event 2) If dm is not empty, - with filter fn, fetch one bigdata and yield it. - w/o filter fn, fetch one big chunk of bigdata and replace smalldata view with the read out bigdata. Yield one bigdata event. """ def __init__(self, view, smd_configs, dm, filter_fn=0, prometheus_counter=None, max_retries=0): if view: pf = PacketFooter(view=view) self.smd_events = pf.split_packets()