Exemplo n.º 1
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')

        for (smd_chunk, step_chunk) in self.smdr_man.chunks():
            # Creates a chunk from smd and epics data to send to SmdNode
            # Anatomy of a chunk (pf=packet_footer):
            # [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ]
            #   ----- smd_chunk ------    ---------epics_chunk-------
            # -------------------------- chunk ------------------------------

            # Read new epics data as available in the queue
            # then send only unseen portion of data to the evtbuilder rank.
            if not smd_chunk: break

            self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)

            # Check missing steps for the current client
            missing_step_views = self.step_hist.get_buffer(rankreq[0])

            # Update this node's step buffers
            step_pf = PacketFooter(view=step_chunk)
            step_views = step_pf.split_packets()
            self.step_hist.extend_buffers(step_views, rankreq[0])

            smd_extended = repack_for_eb(smd_chunk, missing_step_views,
                                         self.run.configs)
            self.run.comms.smd_comm.Send(smd_extended, dest=rankreq[0])

        for i in range(self.run.comms.n_smd_nodes):
            self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            self.run.comms.smd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 2
0
    def events(self, view):
        pf = PacketFooter(view=view)
        views = pf.split_packets()

        # Keeps offset, size, & timestamp for all events in the batch
        # for batch reading (if filter_fn is not given).
        ofsz_batch = np.zeros((pf.n_packets, self.n_smd_files, 2),
                              dtype=np.intp)
        has_offset = True
        for i, event_bytes in enumerate(views):
            if event_bytes:
                evt = Event._from_bytes(self.smd_configs, event_bytes)

                if not evt._has_offset:
                    has_offset = False
                    yield evt  # no offset info in the smalldata event
                else:
                    segment = 0  # "d.info" "detectors" have only one segment
                    ofsz = np.asarray([[d.info[segment].offsetAlg.intOffset, d.info[segment].offsetAlg.intDgramSize] \
                            for d in evt])
                    ofsz_batch[i, :, :] = ofsz

                    # Only get big data one event at a time when filter is off
                    if self.filter_fn:
                        bd_evt = self.dm.jump(ofsz[:, 0], ofsz[:, 1])
                        yield bd_evt

        if self.filter_fn == 0 and has_offset:
            # Read chunks of 'size' bytes and store them in views
            views = [None] * self.n_smd_files
            view_sizes = np.zeros(self.n_smd_files)
            for i in range(self.n_smd_files):
                # If no data were filtered, we can assume that all bigdata
                # dgrams starting from the first offset are stored consecutively
                # in the file. We read a chunk of sum(all dgram sizes) and
                # store in a view.
                offset = ofsz_batch[0, i, 0]
                size = np.sum(ofsz_batch[:, i, 1])
                view_sizes[i] = size

                os.lseek(self.dm.fds[i], offset, 0)
                views[i] = os.read(self.dm.fds[i], size)

            # Build each event from these views
            dgrams = [None] * self.n_smd_files
            offsets = [0] * self.n_smd_files
            for i in range(pf.n_packets):
                for j in range(self.n_smd_files):
                    if offsets[j] >= view_sizes[j]:
                        continue

                    size = ofsz_batch[i, j, 1]
                    if size:
                        dgrams[j] = dgram.Dgram(view=views[j],
                                                config=self.dm.configs[j],
                                                offset=offsets[j])
                        offsets[j] += size

                bd_evt = Event(dgrams)
                yield bd_evt
Exemplo n.º 3
0
 def pack(self, *args):
     pf = PacketFooter(len(args))
     batch = bytearray()
     for i, arg in enumerate(args):
         pf.set_size(i, memoryview(arg).shape[0])
         batch += arg
     batch += pf.footer
     return batch
Exemplo n.º 4
0
    def __init__(self, view, run):
        self.configs = run.configs
        self.batch_size = run.batch_size
        self.filter_fn = run.filter_callback
        self.destination = run.destination
        self.n_files = len(self.configs)

        pf = PacketFooter(view=view)
        views = pf.split_packets()
        self.eb = EventBuilder(views)
Exemplo n.º 5
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')

        for (smd_chunk, update_chunk) in self.smdr_man.chunks():
            # Creates a chunk from smd and epics data to send to SmdNode
            # Anatomy of a chunk (pf=packet_footer):
            # [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ]
            #   ----- smd_chunk ------    ---------epics_chunk-------
            # -------------------------- chunk ------------------------------

            # Read new epics data as available in the queue
            # then send only unseen portion of data to the evtbuilder rank.
            update_pf = PacketFooter(view=update_chunk)
            self.epics_man.extend_buffers(update_pf.split_packets())
            smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            epics_chunk = self.epics_man.get_buffer(rankreq[0])

            pf = PacketFooter(2)
            pf.set_size(0, memoryview(smd_chunk).shape[0])
            pf.set_size(1, memoryview(epics_chunk).shape[0])
            chunk = smd_chunk + epics_chunk + pf.footer

            smd_comm.Send(chunk, dest=rankreq[0])

        for i in range(PS_SMD_NODES):
            smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            smd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 6
0
    def __init__(self, view, run):
        self.configs = run.configs
        self.batch_size = run.batch_size
        self.filter_fn = run.filter_callback
        self.destination = run.destination
        self.n_files = len(self.configs)

        pf = PacketFooter(view=view)
        views = pf.split_packets()
        self.eb = EventBuilder(views, self.configs)
        self.c_filter = PrometheusManager.get_metric('psana_eb_filter')
Exemplo n.º 7
0
    def _to_bytes(self):
        event_bytes = bytearray()
        pf = PacketFooter(self._size)
        for i, d in enumerate(self._dgrams):
            event_bytes.extend(bytearray(d))
            pf.set_size(i, memoryview(bytearray(d)).shape[0])

        if event_bytes:
            event_bytes.extend(pf.footer)

        return event_bytes
Exemplo n.º 8
0
    def chunks(self):
        """ Generates a tuple of smd and step dgrams """
        self._read()
        while self.got_events > 0:
            smd_view = bytearray()
            smd_pf = PacketFooter(n_packets=self.n_files)
            step_view = bytearray()
            step_pf = PacketFooter(n_packets=self.n_files)

            for i in range(self.n_files):
                _smd_view = self.smdr.view(i)
                if _smd_view != 0:
                    smd_view.extend(_smd_view)
                    smd_pf.set_size(i, memoryview(_smd_view).shape[0])

                _step_view = self.smdr.view(i, step=True)
                if _step_view != 0:
                    step_view.extend(_step_view)
                    step_pf.set_size(i, memoryview(_step_view).shape[0])

            if smd_view or step_view:
                if smd_view:
                    smd_view.extend(smd_pf.footer)
                if step_view:
                    step_view.extend(step_pf.footer)
                yield (smd_view, step_view)

            if self.run.max_events:
                if self.processed_events >= self.run.max_events:
                    break

            self._read()
Exemplo n.º 9
0
    def chunks(self):
        """ Generates a tuple of smd and update dgrams """
        got_events = -1
        while got_events != 0:
            self.smdr.get(self.n_events)
            got_events = self.smdr.got_events
            self.processed_events += got_events

            smd_view = bytearray()
            smd_pf = PacketFooter(n_packets=self.n_files)
            update_view = bytearray()
            update_pf = PacketFooter(n_packets=self.n_files)

            for i in range(self.n_files):
                _smd_view = self.smdr.view(i)
                if _smd_view != 0:
                    smd_view.extend(_smd_view)
                    smd_pf.set_size(i, memoryview(_smd_view).shape[0])

                _update_view = self.smdr.view(i, update=True)
                if _update_view != 0:
                    update_view.extend(_update_view)
                    update_pf.set_size(i, memoryview(_update_view).shape[0])

            if smd_view or update_view:
                if smd_view:
                    smd_view.extend(smd_pf.footer)
                if update_view:
                    update_view.extend(update_pf.footer)
                yield (smd_view, update_view)

            if self.max_events:
                if self.processed_events >= self.max_events:
                    break
Exemplo n.º 10
0
    def __init__(self,
                 view,
                 configs,
                 batch_size=1,
                 filter_fn=0,
                 destination=0):
        self.configs = configs
        self.batch_size = batch_size
        self.filter_fn = filter_fn
        self.destination = destination

        pf = PacketFooter(view=view)
        views = pf.split_packets()
        self.eb = EventBuilder(views)
Exemplo n.º 11
0
    def step_chunk(self):
        """ Returns list of steps in all smd files."""
        step_view = bytearray()
        step_pf = PacketFooter(n_packets=self.n_files)

        for i in range(self.n_files):
            _step_view = self.eb.step_view(i)
            if _step_view != 0:
                step_view.extend(_step_view)
                step_pf.set_size(i, memoryview(_step_view).shape[0])

        if step_view:
            step_view.extend(step_pf.footer)

        return step_view
Exemplo n.º 12
0
 def _from_bytes(cls, configs, event_bytes, run=None):
     dgrams = []
     if event_bytes:
         pf = PacketFooter(view=event_bytes)
         views = pf.split_packets()
         
         assert len(configs) == pf.n_packets
         
         dgrams = [None]*pf.n_packets # make sure that dgrams are arranged 
                                      # according to the smd files.
         for i in range(pf.n_packets):
             if views[i].shape[0] > 0: # do not include any missing dgram
                 dgrams[i] = dgram.Dgram(config=configs[i], view=views[i])
     
     evt = cls(dgrams, run=run)
     return evt
Exemplo n.º 13
0
 def extend_buffers(self, views, client_id, as_event=False):
     idx = client_id - 1  # rank 0 has no send history.
     # Views is either list of smdchunks or events
     if not as_event:
         # For Smd0
         for i_smd, view in enumerate(views):
             self.bufs[i_smd].extend(view)
             self.send_history[idx][i_smd] += view.nbytes
     else:
         # For EventBuilder
         for i_evt, evt_bytes in enumerate(views):
             pf = PacketFooter(view=evt_bytes)
             assert pf.n_packets == self.n_smds
             for i_smd, dg_bytes in enumerate(pf.split_packets()):
                 self.bufs[i_smd].extend(dg_bytes)
                 self.send_history[idx][i_smd] += dg_bytes.nbytes
Exemplo n.º 14
0
    def __init__(self, view, smd_configs, dm, filter_fn=0):
        if view:
            pf = PacketFooter(view=view)
            self.smd_events = pf.split_packets()
            self.n_events = pf.n_packets
        else:
            self.smd_events = None
            self.n_events = 0

        self.smd_configs = smd_configs
        self.dm = dm
        self.n_smd_files = len(self.smd_configs)
        self.filter_fn = filter_fn
        self.cn_events = 0

        if not self.filter_fn and len(self.dm.xtc_files) > 0:
            self._read_bigdata_in_chunk()
Exemplo n.º 15
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')

        for (smd_chunk, step_chunk) in self.smdr_man.chunks():
            # Creates a chunk from smd and epics data to send to SmdNode
            # Anatomy of a chunk (pf=packet_footer):
            # [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ]
            #   ----- smd_chunk ------    ---------epics_chunk-------
            # -------------------------- chunk ------------------------------

            # Read new epics data as available in the queue
            # then send only unseen portion of data to the evtbuilder rank.
            if not smd_chunk: break

            st_req = time.time()
            self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            en_req = time.time()

            # Check missing steps for the current client
            missing_step_views = self.step_hist.get_buffer(rankreq[0])

            # Update step buffers (after getting the missing steps
            step_pf = PacketFooter(view=step_chunk)
            step_views = step_pf.split_packets()
            self.step_hist.extend_buffers(step_views, rankreq[0])

            smd_extended = repack_for_eb(smd_chunk, missing_step_views,
                                         self.run.configs)

            self.run.comms.smd_comm.Send(smd_extended, dest=rankreq[0])

            # sending data to prometheus
            self.c_sent.labels('evts',
                               rankreq[0]).inc(self.smdr_man.got_events)
            self.c_sent.labels('batches', rankreq[0]).inc()
            self.c_sent.labels('MB', rankreq[0]).inc(
                memoryview(smd_extended).nbytes / 1e6)
            self.c_sent.labels('seconds', rankreq[0]).inc(en_req - st_req)
            logging.debug(
                f'node.py: Smd0 sent {self.smdr_man.got_events} events to {rankreq[0]} (waiting for this rank took {en_req-st_req:.5f} seconds)'
            )

        for i in range(self.run.comms.n_smd_nodes):
            self.run.comms.smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            self.run.comms.smd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 16
0
    def get_buffer(self, client_id):
        """ Returns new epics data (if any) for this client
        then updates the sent record."""
        update_chunk = bytearray()

        if self.n_smds:  # do nothing if no epics data found
            indexed_id = client_id - 1  # rank 0 has no send history.
            pf = PacketFooter(self.n_smds)
            for i, buf in enumerate(self.bufs):
                current_buf = self.bufs[i]
                current_offset = self.send_history[indexed_id][i]
                current_buf_size = memoryview(current_buf).shape[0]
                pf.set_size(i, current_buf_size - current_offset)
                update_chunk.extend(current_buf[current_offset:])
                self.send_history[indexed_id][i] = current_buf_size
            update_chunk.extend(pf.footer)

        return update_chunk
Exemplo n.º 17
0
    def _send_to_dest(self, dest_rank, smd_batch_dict, step_batch_dict,
                      eb_man):
        bd_comm = self.run.comms.bd_comm
        smd_batch, _ = smd_batch_dict[dest_rank]
        missing_step_views = self.step_hist.get_buffer(dest_rank)
        batch = repack_for_bd(smd_batch,
                              missing_step_views,
                              self.run.configs,
                              client=dest_rank)
        bd_comm.Send(batch, dest=dest_rank)
        del smd_batch_dict[dest_rank]  # done sending

        step_batch, _ = step_batch_dict[dest_rank]
        if eb_man.eb.nsteps > 0 and memoryview(step_batch).nbytes > 0:
            step_pf = PacketFooter(view=step_batch)
            self.step_hist.extend_buffers(step_pf.split_packets(),
                                          dest_rank,
                                          as_event=True)
        del step_batch_dict[dest_rank]  # done adding
Exemplo n.º 18
0
    def run_mpi(self):
        while True:
            bd_comm.Send(np.array([bd_rank], dtype='i'), dest=0)
            info = MPI.Status()
            bd_comm.Probe(source=0, tag=MPI.ANY_TAG, status=info)
            count = info.Get_elements(MPI.BYTE)
            chunk = bytearray(count)
            bd_comm.Recv(chunk, source=0)
            if count == 0:
                break

            if chunk == bytearray(b'wait'):
                continue

            pf = PacketFooter(view=chunk)
            smd_chunk, epics_chunk = pf.split_packets()

            pfe = PacketFooter(view=epics_chunk)
            epics_views = pfe.split_packets()
            self.run.epics_store.update(epics_views)

            if self.run.scan:
                yield Step(self.run, smd_batch=smd_chunk)
            else:
                for event in self.evt_man.events(smd_chunk):
                    yield event
Exemplo n.º 19
0
    def steps(self):
        current_step_pos = 0
        """ Generates events between steps. """
        smdr_man = SmdReaderManager(self.smd_dm.fds, self.max_events)
        for i, (smd_chunk, step_chunk) in enumerate(smdr_man.chunks()):
            # Update step stores
            step_pf = PacketFooter(view=step_chunk)
            step_views = step_pf.split_packets()
            self.epics_store.update(step_views)
            self.step_store.update(step_views)

            eb_man = EventBuilderManager(smd_chunk, self.configs, \
                    batch_size=self.batch_size, filter_fn=self.filter_callback)

            for i, step_dgram in enumerate(
                    self.step_store.dgrams(from_pos=current_step_pos + 1)):
                if step_dgram:
                    limit_ts = step_dgram.seq.timestamp()
                    current_step_pos += 1
                else:
                    limit_ts = -1
                yield Step(self, eb_man=eb_man, limit_ts=limit_ts)
Exemplo n.º 20
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')
        smd_comm = self.run.comms.smd_comm
        n_bd_nodes = self.run.comms.bd_comm.Get_size() - 1
        bd_comm = self.run.comms.bd_comm
        smd_rank = self.run.comms.smd_rank

        step_hist = StepHistory(self.run.comms.bd_size, len(self.run.configs))

        while True:
            smd_comm.Send(np.array([smd_rank], dtype='i'), dest=0)
            info = MPI.Status()
            smd_comm.Probe(source=0, status=info)
            count = info.Get_elements(MPI.BYTE)
            smd_chunk = bytearray(count)
            smd_comm.Recv(smd_chunk, source=0)
            if not smd_chunk:
                break

            eb_man = EventBuilderManager(smd_chunk, self.run)

            # Build batch of events
            for smd_batch_dict in eb_man.batches():
                # If single item and dest_rank=0, send to any bigdata nodes.
                if 0 in smd_batch_dict.keys():
                    smd_batch, _ = smd_batch_dict[0]
                    bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
                    missing_step_views = step_hist.get_buffer(rankreq[0])
                    step_pf = PacketFooter(view=eb_man.step_chunk())
                    step_views = step_pf.split_packets()
                    step_hist.extend_buffers(step_views, rankreq[0])
                    batch = repack_for_bd(smd_batch, missing_step_views,
                                          self.run.configs)
                    bd_comm.Send(batch, dest=rankreq[0])

                # With > 1 dest_rank, start looping until all dest_rank batches
                # have been sent.
                else:
                    while smd_batch_dict:
                        bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
                        if rankreq[0] in smd_batch_dict:
                            smd_batch, _ = smd_batch_dict[rankreq[0]]
                            missing_step_views = step_hist.get_buffer(
                                rankreq[0])
                            step_pf = PacketFooter(view=eb_man.step_chunk())
                            step_hist.extend_buffers(step_pf.split_packets(),
                                                     rankreq[0])
                            batch = repack_for_bd(smd_batch,
                                                  missing_step_views,
                                                  self.run.configs)
                            bd_comm.Send(batch, dest=rankreq[0])
                            del smd_batch_dict[rankreq[0]]  # done sending
                        else:
                            bd_comm.Send(bytearray(b'wait'), dest=rankreq[0])

        # Done - kill all alive bigdata nodes
        for i in range(n_bd_nodes):
            bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            bd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 21
0
    def events(self):
        ev_man = EventManager(self.configs, self.dm, \
                filter_fn=self.filter_callback)

        #get smd chunks
        smdr_man = SmdReaderManager(self.smd_dm.fds, self.max_events)
        for (smd_chunk, step_chunk) in smdr_man.chunks():
            # Update epics_store for each chunk
            step_pf = PacketFooter(view=step_chunk)
            step_views = step_pf.split_packets()
            self.epics_store.update(step_views)

            eb_man = EventBuilderManager(smd_chunk,
                                         self.configs,
                                         batch_size=self.batch_size,
                                         filter_fn=self.filter_callback)

            for batch_dict in eb_man.batches():
                batch, _ = batch_dict[
                    0]  # there's only 1 dest_rank for serial run
                for evt in ev_man.events(batch):
                    if evt._dgrams[0].seq.service() != 12: continue
                    yield evt
Exemplo n.º 22
0
    def __init__(self, view, smd_configs, dm, filter_fn=0):
        if view:
            if view == bytearray(
                    b'wait'
            ):  # RunParallel (unused bigdata nodes get this wait msg)
                self.smd_events = None
                self.n_events = 0
            else:
                pf = PacketFooter(view=view)
                self.smd_events = pf.split_packets()
                self.n_events = pf.n_packets
        else:
            self.smd_events = None
            self.n_events = 0

        self.smd_configs = smd_configs
        self.dm = dm
        self.n_smd_files = len(self.smd_configs)
        self.filter_fn = filter_fn
        self.cn_events = 0

        if not self.filter_fn and len(self.dm.xtc_files) > 0:
            self._read_bigdata_in_chunk()
Exemplo n.º 23
0
    def chunks(self):
        """ Generates a tuple of smd and step dgrams """
        is_done = False
        while not is_done:
            if self.smdr.is_complete():
                mmrv_bufs, mmrv_step_bufs = self.smdr.view(batch_size=self.batch_size)
                self.got_events = self.smdr.view_size
                self.processed_events += self.got_events
                
                # sending data to prometheus
                if self.run.prom_man:
                    logging.debug('Smd0 got %d events'%(self.got_events))
                    self.c_read.labels('evts', 'None').inc(self.got_events)
                    self.c_read.labels('batches', 'None').inc()

                if self.run.max_events and self.processed_events >= self.run.max_events:
                    is_done = True
                
                smd_view = bytearray()
                smd_pf = PacketFooter(n_packets=self.n_files)
                step_view = bytearray()
                step_pf = PacketFooter(n_packets=self.n_files)
                
                for i, (mmrv_buf, mmrv_step_buf) in enumerate(zip(mmrv_bufs, mmrv_step_bufs)):
                    if mmrv_buf != 0:
                        smd_view.extend(mmrv_buf)
                        smd_pf.set_size(i, memoryview(mmrv_buf).nbytes)
                    
                    if mmrv_step_buf != 0:
                        step_view.extend(mmrv_step_buf)
                        step_pf.set_size(i, memoryview(mmrv_step_buf).nbytes)

                if smd_view or step_view:
                    if smd_view:
                        smd_view.extend(smd_pf.footer)
                    if step_view:
                        step_view.extend(step_pf.footer)
                    yield (smd_view, step_view)

            else:
                self.smdr.get()
                if self.run.prom_man:
                    logging.debug('Smd0 read %.2f MB'%(self.smdr.got/1e6))
                    self.c_read.labels('MB', 'None').inc(self.smdr.got/1e6)
                if not self.smdr.is_complete():
                    is_done = True
                    break
Exemplo n.º 24
0
    def test_contents(self):
        view = bytearray()
        pf = PacketFooter(2)
        for i, msg in enumerate([b'packet0', b'packet1']):
            view.extend(msg)
            pf.set_size(i, memoryview(msg).shape[0])

        view.extend(pf.footer)

        pf2 = PacketFooter(view=view)
        assert pf2.n_packets == 2

        views = pf2.split_packets()
        assert memoryview(views[0]).shape[0] == 7
        assert memoryview(views[1]).shape[0] == 7
Exemplo n.º 25
0
def repack_for_eb(smd_chunk, step_views, configs):
    """ Smd0 uses this to prepend missing step views
    to the smd_chunk (just data with the same limit timestamp from all
    smd files - not event-built yet). 
    """
    if step_views:
        smd_chunk_pf = PacketFooter(view=smd_chunk)
        new_chunk_pf = PacketFooter(n_packets=smd_chunk_pf.n_packets)
        new_chunk = bytearray()
        for i, (smd_view, step_view) in enumerate(
                zip(smd_chunk_pf.split_packets(), step_views)):
            new_chunk.extend(step_view + bytearray(smd_view))
            new_chunk_pf.set_size(
                i,
                memoryview(step_view).nbytes + smd_view.nbytes)
        new_chunk.extend(new_chunk_pf.footer)
        return new_chunk
    else:
        return smd_chunk
Exemplo n.º 26
0
def repack_for_bd(smd_batch, step_views, configs):
    """ EventBuilder Node uses this to prepend missing step views 
    to the smd_batch. Unlike repack_for_eb (used by Smd0), this output 
    chunk contains list of pre-built events."""
    if step_views:
        batch_pf = PacketFooter(view=smd_batch)

        # Create bytearray containing a list of events from step_views
        steps = bytearray()
        n_smds = len(step_views)
        offsets = [0] * n_smds
        n_steps = 0
        step_sizes = []
        while offsets[0] < memoryview(step_views[0]).nbytes:
            step_pf = PacketFooter(n_packets=n_smds)
            step_size = 0
            for i, (config, view) in enumerate(zip(configs, step_views)):
                d = Dgram(config=config, view=view, offset=offsets[i])
                steps.extend(d)
                offsets[i] += d._size
                step_size += d._size
                step_pf.set_size(i, d._size)

            steps.extend(step_pf.footer)
            step_sizes.append(step_size + memoryview(step_pf.footer).nbytes)
            n_steps += 1

        # Create new batch with total_events = smd_batch_events + step_events
        new_batch_pf = PacketFooter(n_packets=batch_pf.n_packets + n_steps)
        for i in range(n_steps):
            new_batch_pf.set_size(i, step_sizes[i])

        for i in range(n_steps, new_batch_pf.n_packets):
            new_batch_pf.set_size(i, batch_pf.get_size(i - n_steps))

        new_batch = bytearray()
        new_batch.extend(steps)
        new_batch.extend(smd_batch[:memoryview(smd_batch).nbytes -
                                   memoryview(batch_pf.footer).nbytes])
        new_batch.extend(new_batch_pf.footer)
        return new_batch
    else:
        return smd_batch
Exemplo n.º 27
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')
        current_update_pos = 0
        while True:
            # handles requests from smd_0
            smd_comm.Send(np.array([smd_rank], dtype='i'), dest=0)
            info = MPI.Status()
            smd_comm.Probe(source=0, status=info)
            count = info.Get_elements(MPI.BYTE)
            chunk = bytearray(count)
            smd_comm.Recv(chunk, source=0)
            if count == 0:
                break

            # Unpack the chunk received from Smd0
            pf = PacketFooter(view=chunk)
            smd_chunk, update_chunk = pf.split_packets()

            eb_man = EventBuilderManager(smd_chunk, self.run.configs, \
                    batch_size=self.run.batch_size, filter_fn=self.run.filter_callback, \
                    destination=self.run.destination)

            # Unpack epics chunk and updates run's epics_store and epics_manager
            pfe = PacketFooter(view=update_chunk)
            update_views = pfe.split_packets()
            self.run.epics_store.update(update_views)
            self.run.step_store.update(update_views)
            self.epics_man.extend_buffers(update_views)

            # Build batch of events
            for update_dgram in self.run.step_store.dgrams(
                    from_pos=current_update_pos + 1, scan=self.run.scan):
                if update_dgram:
                    limit_ts = update_dgram.seq.timestamp()
                    current_update_pos += 1
                else:
                    limit_ts = -1

                for smd_batch_dict in eb_man.batches(limit_ts=limit_ts):
                    # If single item and dest_rank=0, send to any bigdata nodes.
                    if 0 in smd_batch_dict.keys():
                        smd_batch, _ = smd_batch_dict[0]
                        bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
                        epics_batch = self.epics_man.get_buffer(rankreq[0])
                        batch = self.pack(smd_batch, epics_batch)
                        bd_comm.Send(batch, dest=rankreq[0])

                    # With > 1 dest_rank, start looping until all dest_rank batches
                    # have been sent.
                    else:
                        while smd_batch_dict:
                            bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)

                            if rankreq[0] in smd_batch_dict:
                                smd_batch, _ = smd_batch_dict[rankreq[0]]
                                epics_batch = self.epics_man.get_buffer(
                                    rankreq[0])
                                batch = self.pack(smd_batch, epics_batch)
                                bd_comm.Send(batch, dest=rankreq[0])
                                del smd_batch_dict[rankreq[0]]  # done sending
                            else:
                                bd_comm.Send(bytearray(b'wait'),
                                             dest=rankreq[0])

        # Done - kill all alive bigdata nodes
        for i in range(self.n_bd_nodes):
            bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            bd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 28
0
from psana.parallelreader import ParallelReader
from psana.psexp.packet_footer import PacketFooter
import os, glob
from psana import dgram

tmp_dir = os.path.join(os.environ.get('TEST_XTC_DIR', os.getcwd()), '.tmp')
xtc_files = [
    os.path.join(tmp_dir, 'data-r0001-s00.xtc2'),
    os.path.join(tmp_dir, 'data-r0001-s01.xtc2')
]
fds = [os.open(xtc_file, os.O_RDONLY) for xtc_file in xtc_files]

configs = [dgram.Dgram(file_descriptor=fd) for fd in fds]

prl_reader = ParallelReader(fds)
block = prl_reader.get_block()

pf = PacketFooter(view=block)
views = pf.split_packets()

for i in range(len(views)):
    config, view = configs[i], views[i]
    d = dgram.Dgram(config=config, view=view)
    #assert getattr(d.epics[0].fast, 'HX2:DVD:GCC:01:PMON') == 41.0
    #assert getattr(d.epics[0].slow, 'XPP:GON:MMS:01:RBV') == 41.0
Exemplo n.º 29
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')
        smd_comm = self.run.comms.smd_comm
        n_bd_nodes = self.run.comms.bd_comm.Get_size() - 1
        bd_comm = self.run.comms.bd_comm
        smd_rank = self.run.comms.smd_rank

        while True:
            smd_comm.Send(np.array([smd_rank], dtype='i'), dest=0)
            info = MPI.Status()
            smd_comm.Probe(source=0, status=info)
            count = info.Get_elements(MPI.BYTE)
            smd_chunk = bytearray(count)
            smd_comm.Recv(smd_chunk, source=0)
            if not smd_chunk:
                break

            eb_man = EventBuilderManager(smd_chunk, self.run)

            # Build batch of events
            for smd_batch_dict, step_batch_dict in eb_man.batches():

                # If single item and dest_rank=0, send to any bigdata nodes.
                if 0 in smd_batch_dict.keys():
                    smd_batch, _ = smd_batch_dict[0]
                    step_batch, _ = step_batch_dict[0]
                    bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)

                    missing_step_views = self.step_hist.get_buffer(rankreq[0])
                    batch = repack_for_bd(smd_batch,
                                          missing_step_views,
                                          self.run.configs,
                                          client=rankreq[0])
                    bd_comm.Send(batch, dest=rankreq[0])

                    if eb_man.eb.nsteps > 0 and memoryview(
                            step_batch).nbytes > 0:
                        step_pf = PacketFooter(view=step_batch)
                        self.step_hist.extend_buffers(step_pf.split_packets(),
                                                      rankreq[0],
                                                      as_event=True)

                # With > 1 dest_rank, start looping until all dest_rank batches
                # have been sent.
                else:
                    # Check if destinations are valid
                    destinations = np.asarray(list(smd_batch_dict.keys()))
                    if any(destinations > n_bd_nodes):
                        print(
                            f"Found invalid destination ({destinations}). Must be <= {n_bd_nodes} (#big data nodes)"
                        )
                        break

                    while smd_batch_dict:
                        sent = False
                        if self.waiting_bds:  # Check first if there are bd nodes waiting
                            copied_waiting_bds = self.waiting_bds[:]
                            for dest_rank in copied_waiting_bds:
                                if dest_rank in smd_batch_dict:
                                    self._send_to_dest(dest_rank,
                                                       smd_batch_dict,
                                                       step_batch_dict, eb_man)
                                    self.waiting_bds.remove(dest_rank)
                                    sent = True

                        if not sent:
                            bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
                            dest_rank = rankreq[0]
                            if dest_rank in smd_batch_dict:
                                self._send_to_dest(dest_rank, smd_batch_dict,
                                                   step_batch_dict, eb_man)
                            else:
                                self.waiting_bds.append(dest_rank)

        # Done
        # - kill idling nodes
        for dest_rank in self.waiting_bds:
            bd_comm.Send(bytearray(), dest=dest_rank)

        # - kill all other nodes
        for i in range(n_bd_nodes - len(self.waiting_bds)):
            bd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
            bd_comm.Send(bytearray(), dest=rankreq[0])
Exemplo n.º 30
0
    def run_mpi(self):
        rankreq = np.empty(1, dtype='i')
        smd_comm = self.run.comms.smd_comm
        n_bd_nodes = self.run.comms.bd_comm.Get_size() - 1
        bd_comm = self.run.comms.bd_comm
        smd_rank = self.run.comms.smd_rank

        while True:
            smd_chunk = self._request_data(smd_comm)
            if not smd_chunk:
                break

            eb_man = EventBuilderManager(smd_chunk, self.run)

            # Build batch of events
            for smd_batch_dict, step_batch_dict in eb_man.batches():

                # If single item and dest_rank=0, send to any bigdata nodes.
                if 0 in smd_batch_dict.keys():
                    smd_batch, _ = smd_batch_dict[0]
                    step_batch, _ = step_batch_dict[0]
                    self._request_rank(rankreq)

                    missing_step_views = self.step_hist.get_buffer(rankreq[0])
                    batch = repack_for_bd(smd_batch,
                                          missing_step_views,
                                          self.run.configs,
                                          client=rankreq[0])
                    bd_comm.Send(batch, dest=rankreq[0])

                    # sending data to prometheus
                    logging.debug(
                        'node.py: EventBuilder sent %d events (%.2f MB) to rank %d'
                        % (eb_man.eb.nevents, memoryview(batch).nbytes / 1e6,
                           rankreq[0]))
                    self.c_sent.labels('evts',
                                       rankreq[0]).inc(eb_man.eb.nevents)
                    self.c_sent.labels('batches', rankreq[0]).inc()
                    self.c_sent.labels('MB', rankreq[0]).inc(
                        memoryview(batch).nbytes / 1e6)

                    if eb_man.eb.nsteps > 0 and memoryview(
                            step_batch).nbytes > 0:
                        step_pf = PacketFooter(view=step_batch)
                        self.step_hist.extend_buffers(step_pf.split_packets(),
                                                      rankreq[0],
                                                      as_event=True)

                # With > 1 dest_rank, start looping until all dest_rank batches
                # have been sent.
                else:
                    # Check if destinations are valid
                    destinations = np.asarray(list(smd_batch_dict.keys()))
                    if any(destinations > n_bd_nodes):
                        print(
                            f"Found invalid destination ({destinations}). Must be <= {n_bd_nodes} (#big data nodes)"
                        )
                        break

                    while smd_batch_dict:
                        sent = False
                        if self.waiting_bds:  # Check first if there are bd nodes waiting
                            copied_waiting_bds = self.waiting_bds[:]
                            for dest_rank in copied_waiting_bds:
                                if dest_rank in smd_batch_dict:
                                    self._send_to_dest(dest_rank,
                                                       smd_batch_dict,
                                                       step_batch_dict, eb_man)
                                    self.waiting_bds.remove(dest_rank)
                                    sent = True

                        if not sent:
                            self._request_rank(rankreq)
                            dest_rank = rankreq[0]
                            if dest_rank in smd_batch_dict:
                                self._send_to_dest(dest_rank, smd_batch_dict,
                                                   step_batch_dict, eb_man)
                            else:
                                self.waiting_bds.append(dest_rank)

        # Done
        # - kill idling nodes
        for dest_rank in self.waiting_bds:
            bd_comm.Send(bytearray(), dest=dest_rank)

        # - kill all other nodes
        for i in range(n_bd_nodes - len(self.waiting_bds)):
            self._request_rank(rankreq)
            bd_comm.Send(bytearray(), dest=rankreq[0])