def __init__(self, *args, **kwargs): super(SerialDataSource, self).__init__(**kwargs) super()._setup_runnum_list() self.runnum_list_index = 0 self.smd_fds = None self.smalldata_obj = SmallData(**self.smalldata_kwargs) self._setup_run() super()._start_prometheus_client()
def __init__(self, *args, **kwargs): super(ShmemDataSource, self).__init__(**kwargs) self.tag = self.shmem self.runnum_list = [0] self.runnum_list_index = 0 self.smalldata_obj = SmallData(**self.smalldata_kwargs) self._setup_run() super()._start_prometheus_client()
def __init__(self, comms, *args, **kwargs): # Check if an I/O-friendly numpy file storing timestamps is given by the user kwargs['mpi_ts'] = 0 if 'timestamps' in kwargs: if isinstance(kwargs['timestamps'], str): kwargs['mpi_ts'] = 1 # Initialize base class super(MPIDataSource, self).__init__(**kwargs) self.smd_fds = None # Set up the MPI communication self.comms = comms comm = self.comms.psana_comm # todo could be better rank = comm.Get_rank() size = comm.Get_size() global nodetype nodetype = self.comms.node_type() # prepare comms for running SmallData PS_SRV_NODES = int(os.environ.get('PS_SRV_NODES', 0)) if PS_SRV_NODES > 0: self.smalldata_obj = SmallData(**self.smalldata_kwargs) else: self.smalldata_obj = None # check if no. of ranks is enough nsmds = int(os.environ.get('PS_EB_NODES', 1)) # No. of smd cores if not (size > (nsmds + 1)): msg = f"""ERROR Too few MPI processes. MPI size must be more than no. of all workers. \n\tTotal psana size:{size} \n\tPS_EB_NODES: {nsmds}""" safe_mpi_abort(msg) # can only have 1 EventBuilder when running with destination if self.destination and nsmds > 1: msg = 'ERROR Too many EventBuilder cores with destination callback' safe_mpi_abort(msg) # Load timestamp files on EventBuilder Node if kwargs['mpi_ts'] == 1 and nodetype == 'eb': self.dsparms.set_timestamps() # setup runnum list if nodetype == 'smd0': super()._setup_runnum_list() else: self.runnum_list= None self.xtc_path = None self.runnum_list = comm.bcast(self.runnum_list, root=0) self.xtc_path = comm.bcast(self.xtc_path, root=0) self.runnum_list_index = 0 self._start_prometheus_client(mpi_rank=rank) self._setup_run()
def __init__(self, comms, *args, **kwargs): super(MPIDataSource, self).__init__(**kwargs) self.comms = comms comm = self.comms.psana_comm # todo could be better rank = comm.Get_rank() size = comm.Get_size() global nodetype nodetype = self.comms.node_type() self.smd_fds = None # prepare comms for running SmallData PS_SRV_NODES = int(os.environ.get('PS_SRV_NODES', 0)) if PS_SRV_NODES > 0: self.smalldata_obj = SmallData(**self.smalldata_kwargs) else: self.smalldata_obj = None # check if no. of ranks is enough nsmds = int(os.environ.get('PS_EB_NODES', 1)) # No. of smd cores if not (size > (nsmds + 1)): msg = f"""ERROR Too few MPI processes. MPI size must be more than no. of all workers. \n\tTotal psana size:{size} \n\tPS_EB_NODES: {nsmds}""" safe_mpi_abort(msg) # can only have 1 EventBuilder when running with destination if self.destination and nsmds > 1: msg = 'ERROR Too many EventBuilder cores with destination callback' safe_mpi_abort(msg) # setup runnum list if nodetype == 'smd0': super()._setup_runnum_list() else: self.runnum_list = None self.xtc_path = None self.runnum_list = comm.bcast(self.runnum_list, root=0) self.xtc_path = comm.bcast(self.xtc_path, root=0) self.runnum_list_index = 0 self._start_prometheus_client(mpi_rank=rank) self._setup_run()
def __init__(self, *args, **kwargs): super(NullDataSource, self).__init__(**kwargs) # prepare comms for running SmallData self.smalldata_obj = SmallData(**self.smalldata_kwargs)
def smalldata(self, **kwargs): return SmallData(**self.smalldata_kwargs, **kwargs)