コード例 #1
0
    def __next__(self):
        if self.cn_events == self.n_events: 
            raise StopIteration
        if len(self.dm.xtc_files) == 0:
            smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run())
            self.cn_events += 1
            self._inc_prometheus_counter('evts')
            return smd_evt
        
        if self.filter_fn:
            smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run())
            self.cn_events += 1
            if smd_evt.service() == TransitionId.L1Accept:
                offset_and_size_array = smd_evt.get_offsets_and_sizes()
                bd_evt = self.dm.jump(offset_and_size_array[:,0], offset_and_size_array[:,1])
                self._inc_prometheus_counter('MB', np.sum(offset_and_size_array[:,1])/1e6)
                logging.debug('BigData read single %.2f MB'%(np.sum(offset_and_size_array[:,1])/1e6))
            else:
                bd_evt = smd_evt

            self._inc_prometheus_counter('evts')
            return bd_evt
        
        dgrams = [None] * self.n_smd_files
        ofsz = self.ofsz_batch[self.cn_events,:,:]
        for j in range(self.n_smd_files):
            if ofsz[j,1]:
                dgrams[j] = dgram.Dgram(view=self.bigdata[j], config=self.dm.configs[j], offset=ofsz[j,0])
        bd_evt = Event(dgrams, run=self.dm.run())
        self.cn_events += 1
        self._inc_prometheus_counter('evts')
        return bd_evt
コード例 #2
0
    def events(self, view):
        pf = PacketFooter(view=view)
        views = pf.split_packets()

        # Keeps offset, size, & timestamp for all events in the batch
        # for batch reading (if filter_fn is not given).
        ofsz_batch = np.zeros((pf.n_packets, self.n_smd_files, 2),
                              dtype=np.intp)
        has_offset = True
        for i, event_bytes in enumerate(views):
            if event_bytes:
                evt = Event._from_bytes(self.smd_configs, event_bytes)

                if not evt._has_offset:
                    has_offset = False
                    yield evt  # no offset info in the smalldata event
                else:
                    segment = 0  # "d.info" "detectors" have only one segment
                    ofsz = np.asarray([[d.info[segment].offsetAlg.intOffset, d.info[segment].offsetAlg.intDgramSize] \
                            for d in evt])
                    ofsz_batch[i, :, :] = ofsz

                    # Only get big data one event at a time when filter is off
                    if self.filter_fn:
                        bd_evt = self.dm.jump(ofsz[:, 0], ofsz[:, 1])
                        yield bd_evt

        if self.filter_fn == 0 and has_offset:
            # Read chunks of 'size' bytes and store them in views
            views = [None] * self.n_smd_files
            view_sizes = np.zeros(self.n_smd_files)
            for i in range(self.n_smd_files):
                # If no data were filtered, we can assume that all bigdata
                # dgrams starting from the first offset are stored consecutively
                # in the file. We read a chunk of sum(all dgram sizes) and
                # store in a view.
                offset = ofsz_batch[0, i, 0]
                size = np.sum(ofsz_batch[:, i, 1])
                view_sizes[i] = size

                os.lseek(self.dm.fds[i], offset, 0)
                views[i] = os.read(self.dm.fds[i], size)

            # Build each event from these views
            dgrams = [None] * self.n_smd_files
            offsets = [0] * self.n_smd_files
            for i in range(pf.n_packets):
                for j in range(self.n_smd_files):
                    if offsets[j] >= view_sizes[j]:
                        continue

                    size = ofsz_batch[i, j, 1]
                    if size:
                        dgrams[j] = dgram.Dgram(view=views[j],
                                                config=self.dm.configs[j],
                                                offset=offsets[j])
                        offsets[j] += size

                bd_evt = Event(dgrams)
                yield bd_evt
コード例 #3
0
ファイル: event_manager.py プロジェクト: ZhenghengLi/lcls2
    def __next__(self):
        if self.cn_events == self.n_events:
            raise StopIteration
        if len(self.dm.xtc_files) == 0:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            return smd_evt

        if self.filter_fn:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            ofsz = np.asarray([[d.smdinfo[0].offsetAlg.intOffset, \
                    d.smdinfo[0].offsetAlg.intDgramSize] for d in smd_evt])
            bd_evt = self.dm.jump(ofsz[:, 0], ofsz[:, 1])
            return bd_evt

        dgrams = [None] * self.n_smd_files
        ofsz = self.ofsz_batch[self.cn_events, :, :]
        for j in range(self.n_smd_files):
            if ofsz[j, 1]:
                dgrams[j] = dgram.Dgram(view=self.bigdata[j],
                                        config=self.dm.configs[j],
                                        offset=ofsz[j, 0])
        bd_evt = Event(dgrams, run=self.dm.run())
        self.cn_events += 1
        return bd_evt
コード例 #4
0
ファイル: event_manager.py プロジェクト: slactjohnson/lcls2
    def __next__(self):
        if self.cn_events == self.n_events:
            raise StopIteration
        if len(self.dm.xtc_files) == 0:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            return smd_evt

        if self.filter_fn:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            if smd_evt.service() == TransitionId.L1Accept:
                offset_and_size_array = smd_evt.get_offsets_and_sizes()
                bd_evt = self.dm.jump(offset_and_size_array[:, 0],
                                      offset_and_size_array[:, 1])
            else:
                bd_evt = smd_evt

            return bd_evt

        dgrams = [None] * self.n_smd_files
        ofsz = self.ofsz_batch[self.cn_events, :, :]
        for j in range(self.n_smd_files):
            if ofsz[j, 1]:
                dgrams[j] = dgram.Dgram(view=self.bigdata[j],
                                        config=self.dm.configs[j],
                                        offset=ofsz[j, 0])
        bd_evt = Event(dgrams, run=self.dm.run())
        self.cn_events += 1
        return bd_evt
コード例 #5
0
    def _read_bigdata_in_chunk(self):
        """ Read bigdata chunks of 'size' bytes and store them in views
        Note that views here contain bigdata (and not smd) events.
        All non L1 dgrams are copied from smd_events and prepend
        directly to bigdata chunks.
        """
        self.bigdata = []
        for i in range(self.n_smd_files):
            self.bigdata.append(bytearray())

        offsets = [0] * self.n_smd_files
        sizes = [0] * self.n_smd_files
        self.ofsz_batch = np.zeros((self.n_events, self.n_smd_files, 2),
                                   dtype=np.intp)

        # Look for first L1 event - copy all non L1 to bigdata buffers
        first_L1_pos = -1
        for i, event_bytes in enumerate(self.smd_events):
            if event_bytes:
                smd_evt = Event._from_bytes(self.smd_configs,
                                            event_bytes,
                                            run=self.dm.run())
                ofsz = smd_evt.get_offsets_and_sizes()
                if smd_evt.service() == TransitionId.L1Accept:
                    offsets = ofsz[:, 0]
                    first_L1_pos = i
                    break
                else:
                    for smd_id, d in enumerate(smd_evt._dgrams):
                        if not d: continue
                        self.bigdata[smd_id].extend(d)

                if i > 0:
                    self.ofsz_batch[i, :, 0] = self.ofsz_batch[
                        i - 1, :, 0] + self.ofsz_batch[i - 1, :, 1]
                self.ofsz_batch[i, :, 1] = ofsz[:, 1]

        if first_L1_pos == -1: return

        for i, event_bytes in enumerate(self.smd_events[first_L1_pos:]):
            j = i + first_L1_pos
            if event_bytes:
                smd_evt = Event._from_bytes(self.smd_configs,
                                            event_bytes,
                                            run=self.dm.run())
                ofsz = smd_evt.get_offsets_and_sizes()

                if j > 0:
                    self.ofsz_batch[j, :, 0] = self.ofsz_batch[
                        j - 1, :, 0] + self.ofsz_batch[j - 1, :, 1]
                self.ofsz_batch[j, :, 1] = ofsz[:, 1]

                sizes += ofsz[:, 1]

        # If no data were filtered, we can assume that all bigdata
        # dgrams starting from the first offset are stored consecutively
        # in the file. We read a chunk of sum(all dgram sizes) and
        # store in a view.
        self._read_chunks_from_disk(self.dm.fds, offsets, sizes)
コード例 #6
0
    def __next__(self):
        """ only support sequential read - no event building"""
        if self.buffered_beginruns:
            self.found_endrun = False
            evt = Event(self.buffered_beginruns, run=self.run())
            self._timestamps += [evt.timestamp]
            self.buffered_beginruns = []
            return evt

        if self.shmem_cli:
            view = self.shmem_cli.get(self.shmem_kwargs)
            if view:
                # Release shmem buffer after copying Transition data
                # cpo: copy L1Accepts too because some shmem
                # applications like AMI's pickN can hold references
                # to dgrams for a long time, consuming the shmem buffers
                # and creating a deadlock situation. could revisit this
                # later and only deep-copy arrays inside pickN, for example
                # but would be more fragile.
                barray = bytes(view[:_dgSize(view)])
                self.shmem_cli.freeByIndex(self.shmem_kwargs['index'],
                                           self.shmem_kwargs['size'])
                view = memoryview(barray)
                # use the most recent configure datagram
                config = self.configs[len(self.configs) - 1]
                d = dgram.Dgram(config=config, view=view)
                dgrams = [d]
            else:
                raise StopIteration
        else:
            try:
                dgrams = [
                    dgram.Dgram(config=config, max_retries=self.max_retries)
                    for config in self.configs
                ]
            except StopIteration:
                fake_endruns = self._check_missing_endrun()
                if fake_endruns:
                    dgrams = fake_endruns
                else:
                    raise StopIteration

        # Check BeginRun - EndRun pairing
        service = dgrams[0].service()
        if service == TransitionId.BeginRun:
            fake_endruns = self._check_missing_endrun(beginruns=dgrams)
            if fake_endruns:
                dgrams = fake_endruns

        if service == TransitionId.EndRun:
            self.found_endrun = True

        evt = Event(dgrams, run=self.run())
        self._timestamps += [evt.timestamp]
        return evt
コード例 #7
0
    def __next__(self):
        """ only support sequential read - no event building"""
        if self.shmem_cli:
            view = self.shmem_cli.get(self.shmem_kwargs)
            if view:
                # Release shmem buffer after copying Transition data
                if _service(view) != TransitionId.L1Accept:
                    barray = bytes(view[:_dgSize(view)])
                    self.shmem_cli.freeByIndex(self.shmem_kwargs['index'],
                                               self.shmem_kwargs['size'])
                    view = memoryview(barray)
                # use the most recent configure datagram
                config = self.configs[len(self.configs) - 1]
                d = dgram.Dgram(config=config,view=view, \
                                shmem_index=self.shmem_kwargs['index'], \
                                shmem_size=self.shmem_kwargs['size'], \
                                shmem_cli_cptr=self.shmem_kwargs['cli_cptr'], \
                                shmem_cli_pyobj=self.shmem_cli)
                dgrams = [d]
            else:
                raise StopIteration
        else:
            dgrams = [dgram.Dgram(config=config) for config in self.configs]

        evt = Event(dgrams, run=self.run())
        self._timestamps += [evt.timestamp]
        return evt
コード例 #8
0
ファイル: dgrammanager.py プロジェクト: pcdshub/lcls2
 def jump(self, offsets, sizes):
     """ Jumps to the offset and reads out dgram on each xtc file.
     This is used in normal mode (multiple detectors with MPI).
     """
     assert len(offsets) > 0 and len(sizes) > 0
     dgrams = [self.jumps(dgram_i, offset, size) for dgram_i, (offset, size)
         in enumerate(zip(offsets, sizes))]
     evt = Event(dgrams, run=self._run)
     return evt
コード例 #9
0
ファイル: event_manager.py プロジェクト: monarin/divelite
    def _get_next_evt(self):
        """ Generate bd evt for different cases:
        1) No bigdata or is a Transition Event prior to i_first_L1
            create dgrams from smd_view
        2) L1Accept event
            create dgrams from bd_bufs
        3) L1Accept with some smd files replaced by bigdata files
            create dgram from smd_view if use_smds[i_smd] is set
            otherwise create dgram from bd_bufs
        """
        dgrams = [None] * self.n_smd_files
        for i_smd in range(self.n_smd_files):
            # Check in case we need to switch to the next bigdata chunk file
            if self.services[self.i_evt] != TransitionId.L1Accept:
                if self.new_chunk_id_array[self.i_evt, i_smd] != 0:
                    print(f'open_new_bd_file i_smd={i_smd} chunk_id={self.new_chunk_id_array[self.i_evt, i_smd]}')
                    self._open_new_bd_file(i_smd, 
                            self.new_chunk_id_array[self.i_evt, i_smd])
                
            view, offset, size = (bytearray(),0,0)
            # Try to create dgram from smd view
            if self.dm.n_files == 0 or self.use_smds[i_smd] \
                    or self.i_evt < self.i_first_L1s[i_smd]:
                view = self.smd_view
                offset = self.smd_offset_array[self.i_evt, i_smd]
                size = self.smd_size_array[self.i_evt, i_smd]

                # Non L1 dgram prior to i_first_L1 are counted as a new "chunk" 
                # because their cutoff flag is set (data coming from smd view 
                # instead of bd chunk). We'll need to update chunk index for
                # this smd when we see non L1.
                self.chunk_indices[i_smd] += 1

            else:
                # Check if we need to fill bd buf if this dgram doesn't fit in the current view
                if self.bd_buf_offsets[i_smd] + self.bd_size_array[self.i_evt, i_smd] \
                        > memoryview(self.bd_bufs[i_smd]).nbytes:
                    self._fill_bd_chunk(i_smd)
                    self.chunk_indices[i_smd] += 1
                
                # This is the offset of bd buffer! and not what stored in smd dgram,
                # which in contrast points to the location of disk.
                offset = self.bd_buf_offsets[i_smd] 
                size = self.bd_size_array[self.i_evt, i_smd] 
                view = self.bd_bufs[i_smd]
                self.bd_buf_offsets[i_smd] += size
            
            if size:  # handles missing dgram
                dgrams[i_smd] = dgram.Dgram(config=self.dm.configs[i_smd], view=view, offset=offset)

        self.i_evt += 1
        self._inc_prometheus_counter('evts')
        evt = Event(dgrams=dgrams, run=self.dm.get_run()) 
        print(f'YIELD ts={evt.timestamp} service={evt.service()}')
        return evt
コード例 #10
0
ファイル: event_manager.py プロジェクト: Tubbz-alt/lcls2
    def __next__(self):
        if self.cn_events == self.n_events:
            raise StopIteration
        if len(self.dm.xtc_files) == 0:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            self._inc_prometheus_counter('evts')
            return smd_evt

        if self.filter_fn:
            smd_evt = Event._from_bytes(self.smd_configs,
                                        self.smd_events[self.cn_events],
                                        run=self.dm.run())
            self.cn_events += 1
            if smd_evt.service() == TransitionId.L1Accept:
                offset_and_size_array = smd_evt.get_offsets_and_sizes()
                bd_evt = self._read_event_from_disk(
                    offset_and_size_array[:, 0], offset_and_size_array[:, 1])
                self._inc_prometheus_counter(
                    'MB',
                    np.sum(offset_and_size_array[:, 1]) / 1e6)
            else:
                bd_evt = smd_evt

            self._inc_prometheus_counter('evts')
            return bd_evt

        dgrams = [None] * self.n_smd_files
        ofsz = self.ofsz_batch[self.cn_events, :, :]
        for j in range(self.n_smd_files):
            d_offset, d_size = ofsz[j]
            if d_size and d_offset + d_size <= \
                    memoryview(self.bigdata[j]).nbytes:
                dgrams[j] = dgram.Dgram(view=self.bigdata[j],
                                        config=self.dm.configs[j],
                                        offset=d_offset)
        bd_evt = Event(dgrams, run=self.dm.run())
        self.cn_events += 1
        self._inc_prometheus_counter('evts')
        return bd_evt
コード例 #11
0
ファイル: event_manager.py プロジェクト: pcdshub/lcls2
    def __next__(self):
        if self.cn_events == self.n_events:
            raise StopIteration

        smd_evt = Event._from_bytes(self.smd_configs,
                                    self.smd_events[self.cn_events],
                                    run=self.dm.get_run())
        if len(self.dm.xtc_files
               ) == 0 or smd_evt.service() != TransitionId.L1Accept:
            self.cn_events += 1
            self._inc_prometheus_counter('evts')
            return smd_evt

        if self.filter_fn:
            bd_dgrams = []
            read_size = 0
            for smd_i, smd_dgram in enumerate(smd_evt._dgrams):
                if self.use_smds[smd_i]:
                    bd_dgrams.append(smd_dgram)
                else:
                    offset_and_size = smd_evt.get_offset_and_size(smd_i)
                    read_size += offset_and_size[0, 1]
                    bd_dgrams.append(
                        self._read_dgram_from_disk(smd_i, offset_and_size))
            bd_evt = Event(dgrams=bd_dgrams, run=self.dm.get_run())
            self.cn_events += 1
            self._inc_prometheus_counter('evts')
            return bd_evt

        dgrams = [None] * self.n_smd_files
        ofsz = self.ofsz_batch[self.cn_events, :, :]
        for i_smd in range(self.n_smd_files):
            d_offset, d_size = ofsz[i_smd]
            if d_size and d_offset + d_size <= \
                    memoryview(self.bigdata[i_smd]).nbytes:
                dgrams[i_smd] = dgram.Dgram(view=self.bigdata[i_smd],
                                            config=self.dm.configs[i_smd],
                                            offset=d_offset)
        bd_evt = Event(dgrams, run=self.dm.get_run())
        self.cn_events += 1
        self._inc_prometheus_counter('evts')
        return bd_evt
コード例 #12
0
ファイル: event_manager.py プロジェクト: slac-lcls/lcls2
    def _get_next_evt(self):
        """ Generate bd evt for different cases:
        1) No bigdata or Transition Event
            create dgrams from smd_view
        2) L1Accept event
            create dgrams from bd_bufs
        3) L1Accept with some smd files replaced by bigdata files
            create dgram from smd_view if use_smds[i_smd] is set
            otherwise create dgram from bd_bufs
        """
        dgrams = [None] * self.n_smd_files
        for i_smd in range(self.n_smd_files):
            if self.dm.n_files == 0 or                               \
                    self.services[self.i_evt] != TransitionId.L1Accept or   \
                    self.use_smds[i_smd]:
                view = self.smd_view
                offset = self.smd_offset_array[self.i_evt, i_smd]
                size = self.smd_size_array[self.i_evt, i_smd]

                # Non L1 always are counted as a new "chunk" since they
                # ther cutoff flag is set (data coming from smd view
                # instead of bd chunk. We'll need to update chunk index for
                # this smd when we see non L1.
                self.chunk_indices[i_smd] += 1

                # Check in case we need to switch to the next bigdata chunk file
                if self.services[self.i_evt] != TransitionId.L1Accept:
                    if self.new_chunk_id_array[self.i_evt, i_smd] != 0:
                        self._open_new_bd_file(
                            i_smd, self.new_chunk_id_array[self.i_evt, i_smd])
            else:
                # Fill up bd buf if this dgram doesn't fit in the current view
                if self.bd_buf_offsets[i_smd] + self.bd_size_array[self.i_evt, i_smd] \
                        > memoryview(self.bd_bufs[i_smd]).nbytes:
                    self._fill_bd_chunk(i_smd)
                    self.chunk_indices[i_smd] += 1

                # This is the offset of bd buffer! and not what stored in smd dgram,
                # which in contrast points to the location of disk.
                offset = self.bd_buf_offsets[i_smd]
                size = self.bd_size_array[self.i_evt, i_smd]
                view = self.bd_bufs[i_smd]
                self.bd_buf_offsets[i_smd] += size

            if size > 0:  # handles missing dgram
                dgrams[i_smd] = dgram.Dgram(config=self.dm.configs[i_smd],
                                            view=view,
                                            offset=offset)

        self.i_evt += 1
        self._inc_prometheus_counter('evts')
        evt = Event(dgrams=dgrams, run=self.dm.get_run())
        return evt
コード例 #13
0
 def events(self, view):
     views = view.split(b'endofevt')
     for event_bytes in views:
         if event_bytes:
             evt = Event().from_bytes(self.ds.smd_dm.configs, event_bytes)
             # get big data
             ofsz = np.asarray([[d.info.offsetAlg.intOffset, d.info.offsetAlg.intDgramSize] \
                     for d in evt])
             bd_evt = self.ds.dm.next(offsets=ofsz[:, 0],
                                      sizes=ofsz[:, 1],
                                      read_chunk=False)
             yield bd_evt
コード例 #14
0
 def jump(self, offsets, sizes):
     """ Jumps to the offset and reads out dgram on each xtc file.
     This is used in normal mode (multiple detectors with MPI).
     """
     assert len(offsets) > 0 and len(sizes) > 0
     dgrams = []
     for fd, config, offset, size in zip(self.fds, self.configs, offsets, sizes):
         d = dgram.Dgram(file_descriptor=fd, config=config, offset=offset, size=size)   
         dgrams += [d]
     
     evt = Event(dgrams, run=self.run())
     return evt
コード例 #15
0
    def next(self):
        """ only support sequential read - no event building"""
        if self.shmem:
            view = self.shmem.get(self.shmem_kwargs)
            if view:
                # use the most recent configure datagram
                config = self.configs[len(self.configs) - 1]
                d = dgram.Dgram(config=config,view=view, \
                                shmem_index=self.shmem_kwargs['index'], \
                                shmem_size=self.shmem_kwargs['size'], \
                                shmem_cli=self.shmem_kwargs['cli'])
                dgrams = [d]
            else:
                raise StopIteration
        else:
            dgrams = [dgram.Dgram(config=config) for config in self.configs]

        evt = Event(dgrams)
        self._timestamps += [evt.timestamp]
        return evt
コード例 #16
0
ファイル: datasource.py プロジェクト: chuckie82/lcls2
def bd_node(ds, smd_node_id):
    while True:
        comm.Send(np.array([rank], dtype='i'), dest=smd_node_id, tag=13)
        info = MPI.Status()
        comm.Probe(source=smd_node_id, tag=MPI.ANY_TAG, status=info)
        count = info.Get_elements(MPI.BYTE)
        view = bytearray(count)
        comm.Recv(view, source=smd_node_id)
        if view.startswith(b'eof'):
            break

        views = view.split(b'endofevt')
        for event_bytes in views:
            if event_bytes:
                evt = Event().from_bytes(ds.smd_configs, event_bytes)
                # get big data
                ofsz = np.asarray([[d.info.offsetAlg.intOffset, d.info.offsetAlg.intDgramSize] \
                        for d in evt])
                bd_evt = ds.dm.next(offsets=ofsz[:,0], sizes=ofsz[:,1], read_chunk=False)
                yield bd_evt
コード例 #17
0
ファイル: dgrammanager.py プロジェクト: chuckie82/lcls2
    def next(self, offsets=[], sizes=[], read_chunk=True):
        assert len(self.offsets) > 0 or len(offsets) > 0

        if len(offsets) == 0: offsets = self.offsets
        if len(sizes) == 0: sizes = [0] * len(offsets)

        dgrams = []
        for fd, config, offset, size in zip(self.fds, self.configs, offsets,
                                            sizes):
            if (read_chunk):
                d = dgram.Dgram(config=config, offset=offset)
            else:
                assert size > 0
                d = dgram.Dgram(file_descriptor=fd,
                                config=config,
                                offset=offset,
                                size=size)
            dgrams += [d]

        evt = Event(dgrams=dgrams)
        self.offsets = evt.offsets
        return evt
コード例 #18
0
ファイル: event_manager.py プロジェクト: pcdshub/lcls2
 def _calc_offset_and_size(self, first_L1_pos, offsets, sizes):
     for i_evt, event_bytes in enumerate(self.smd_events[first_L1_pos:]):
         j_evt = i_evt + first_L1_pos
         if event_bytes:
             smd_evt = Event._from_bytes(self.smd_configs,
                                         event_bytes,
                                         run=self.dm.get_run())
             for i_smd, smd_dgram in enumerate(smd_evt._dgrams):
                 if self.use_smds[i_smd]:
                     d_size = smd_dgram._size
                     self.bigdata[i_smd].extend(smd_dgram)
                 else:
                     d_size = smd_evt.get_offset_and_size(i_smd)[
                         0, 1]  # only need size
                 if j_evt > 0:
                     prev_d_offset = self.ofsz_batch[j_evt - 1, i_smd, 0]
                     prev_d_size = self.ofsz_batch[j_evt - 1, i_smd, 1]
                     d_offset = prev_d_offset + prev_d_size
                 else:
                     d_offset = 0
                 self.ofsz_batch[j_evt, i_smd] = [d_offset, d_size]
                 sizes[i_smd] += d_size
コード例 #19
0
    def jump(self, offsets, sizes):
        """ Jumps to the offset and reads out dgram on each xtc file.
        This is used in normal mode (multiple detectors with MPI).
        """
        assert len(offsets) > 0 and len(sizes) > 0
        dgrams = []
        for fd, config, offset, size in zip(self.fds, self.configs, offsets,
                                            sizes):
            if offset == 0 and size == 0:
                d = None
            else:
                try:
                    d = dgram.Dgram(file_descriptor=fd,
                                    config=config,
                                    offset=offset,
                                    size=size,
                                    max_retries=self.max_retries)
                except StopIteration:
                    d = None

            dgrams += [d]

        evt = Event(dgrams, run=self.run())
        return evt
コード例 #20
0
ファイル: singlefile_ds.py プロジェクト: pcdshub/lcls2
 def runs(self):
     while self._start_run():
         run = RunSingleFile(self, Event(dgrams=self.beginruns))
         yield run
コード例 #21
0
ファイル: run.py プロジェクト: slac-lcls/lcls2
 def step(self, evt):
     step_dgrams = self.esm.stores['scan'].get_step_dgrams_of_event(evt)
     return Event(dgrams=step_dgrams, run=self)
コード例 #22
0
ファイル: legion_ds.py プロジェクト: pcdshub/lcls2
 def runs(self):
     while self._start_run():
         run = RunLegion(self, Event(dgrams=self.beginruns))
         yield run