def __next__(self): if self.cn_events == self.n_events: raise StopIteration if len(self.dm.xtc_files) == 0: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 return smd_evt if self.filter_fn: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 if smd_evt.service() == TransitionId.L1Accept: offset_and_size_array = smd_evt.get_offsets_and_sizes() bd_evt = self.dm.jump(offset_and_size_array[:, 0], offset_and_size_array[:, 1]) else: bd_evt = smd_evt return bd_evt dgrams = [None] * self.n_smd_files ofsz = self.ofsz_batch[self.cn_events, :, :] for j in range(self.n_smd_files): if ofsz[j, 1]: dgrams[j] = dgram.Dgram(view=self.bigdata[j], config=self.dm.configs[j], offset=ofsz[j, 0]) bd_evt = Event(dgrams, run=self.dm.run()) self.cn_events += 1 return bd_evt
def __next__(self): if self.cn_events == self.n_events: raise StopIteration if len(self.dm.xtc_files) == 0: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return smd_evt if self.filter_fn: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 if smd_evt.service() == TransitionId.L1Accept: offset_and_size_array = smd_evt.get_offsets_and_sizes() bd_evt = self.dm.jump(offset_and_size_array[:,0], offset_and_size_array[:,1]) self._inc_prometheus_counter('MB', np.sum(offset_and_size_array[:,1])/1e6) logging.debug('BigData read single %.2f MB'%(np.sum(offset_and_size_array[:,1])/1e6)) else: bd_evt = smd_evt self._inc_prometheus_counter('evts') return bd_evt dgrams = [None] * self.n_smd_files ofsz = self.ofsz_batch[self.cn_events,:,:] for j in range(self.n_smd_files): if ofsz[j,1]: dgrams[j] = dgram.Dgram(view=self.bigdata[j], config=self.dm.configs[j], offset=ofsz[j,0]) bd_evt = Event(dgrams, run=self.dm.run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return bd_evt
def __next__(self): if self.cn_events == self.n_events: raise StopIteration if len(self.dm.xtc_files) == 0: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 return smd_evt if self.filter_fn: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 ofsz = np.asarray([[d.smdinfo[0].offsetAlg.intOffset, \ d.smdinfo[0].offsetAlg.intDgramSize] for d in smd_evt]) bd_evt = self.dm.jump(ofsz[:, 0], ofsz[:, 1]) return bd_evt dgrams = [None] * self.n_smd_files ofsz = self.ofsz_batch[self.cn_events, :, :] for j in range(self.n_smd_files): if ofsz[j, 1]: dgrams[j] = dgram.Dgram(view=self.bigdata[j], config=self.dm.configs[j], offset=ofsz[j, 0]) bd_evt = Event(dgrams, run=self.dm.run()) self.cn_events += 1 return bd_evt
def _read_bigdata_in_chunk(self): """ Read bigdata chunks of 'size' bytes and store them in views Note that views here contain bigdata (and not smd) events. All non L1 dgrams are copied from smd_events and prepend directly to bigdata chunks. """ self.bigdata = [] for i in range(self.n_smd_files): self.bigdata.append(bytearray()) offsets = [0] * self.n_smd_files sizes = [0] * self.n_smd_files self.ofsz_batch = np.zeros((self.n_events, self.n_smd_files, 2), dtype=np.intp) # Look for first L1 event - copy all non L1 to bigdata buffers first_L1_pos = -1 for i, event_bytes in enumerate(self.smd_events): if event_bytes: smd_evt = Event._from_bytes(self.smd_configs, event_bytes, run=self.dm.run()) ofsz = smd_evt.get_offsets_and_sizes() if smd_evt.service() == TransitionId.L1Accept: offsets = ofsz[:, 0] first_L1_pos = i break else: for smd_id, d in enumerate(smd_evt._dgrams): if not d: continue self.bigdata[smd_id].extend(d) if i > 0: self.ofsz_batch[i, :, 0] = self.ofsz_batch[ i - 1, :, 0] + self.ofsz_batch[i - 1, :, 1] self.ofsz_batch[i, :, 1] = ofsz[:, 1] if first_L1_pos == -1: return for i, event_bytes in enumerate(self.smd_events[first_L1_pos:]): j = i + first_L1_pos if event_bytes: smd_evt = Event._from_bytes(self.smd_configs, event_bytes, run=self.dm.run()) ofsz = smd_evt.get_offsets_and_sizes() if j > 0: self.ofsz_batch[j, :, 0] = self.ofsz_batch[ j - 1, :, 0] + self.ofsz_batch[j - 1, :, 1] self.ofsz_batch[j, :, 1] = ofsz[:, 1] sizes += ofsz[:, 1] # If no data were filtered, we can assume that all bigdata # dgrams starting from the first offset are stored consecutively # in the file. We read a chunk of sum(all dgram sizes) and # store in a view. self._read_chunks_from_disk(self.dm.fds, offsets, sizes)
def events(self, view): pf = PacketFooter(view=view) views = pf.split_packets() # Keeps offset, size, & timestamp for all events in the batch # for batch reading (if filter_fn is not given). ofsz_batch = np.zeros((pf.n_packets, self.n_smd_files, 2), dtype=np.intp) has_offset = True for i, event_bytes in enumerate(views): if event_bytes: evt = Event._from_bytes(self.smd_configs, event_bytes) if not evt._has_offset: has_offset = False yield evt # no offset info in the smalldata event else: segment = 0 # "d.info" "detectors" have only one segment ofsz = np.asarray([[d.info[segment].offsetAlg.intOffset, d.info[segment].offsetAlg.intDgramSize] \ for d in evt]) ofsz_batch[i, :, :] = ofsz # Only get big data one event at a time when filter is off if self.filter_fn: bd_evt = self.dm.jump(ofsz[:, 0], ofsz[:, 1]) yield bd_evt if self.filter_fn == 0 and has_offset: # Read chunks of 'size' bytes and store them in views views = [None] * self.n_smd_files view_sizes = np.zeros(self.n_smd_files) for i in range(self.n_smd_files): # If no data were filtered, we can assume that all bigdata # dgrams starting from the first offset are stored consecutively # in the file. We read a chunk of sum(all dgram sizes) and # store in a view. offset = ofsz_batch[0, i, 0] size = np.sum(ofsz_batch[:, i, 1]) view_sizes[i] = size os.lseek(self.dm.fds[i], offset, 0) views[i] = os.read(self.dm.fds[i], size) # Build each event from these views dgrams = [None] * self.n_smd_files offsets = [0] * self.n_smd_files for i in range(pf.n_packets): for j in range(self.n_smd_files): if offsets[j] >= view_sizes[j]: continue size = ofsz_batch[i, j, 1] if size: dgrams[j] = dgram.Dgram(view=views[j], config=self.dm.configs[j], offset=offsets[j]) offsets[j] += size bd_evt = Event(dgrams) yield bd_evt
def __next__(self): if self.cn_events == self.n_events: raise StopIteration if len(self.dm.xtc_files) == 0: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return smd_evt if self.filter_fn: smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.run()) self.cn_events += 1 if smd_evt.service() == TransitionId.L1Accept: offset_and_size_array = smd_evt.get_offsets_and_sizes() bd_evt = self._read_event_from_disk( offset_and_size_array[:, 0], offset_and_size_array[:, 1]) self._inc_prometheus_counter( 'MB', np.sum(offset_and_size_array[:, 1]) / 1e6) else: bd_evt = smd_evt self._inc_prometheus_counter('evts') return bd_evt dgrams = [None] * self.n_smd_files ofsz = self.ofsz_batch[self.cn_events, :, :] for j in range(self.n_smd_files): d_offset, d_size = ofsz[j] if d_size and d_offset + d_size <= \ memoryview(self.bigdata[j]).nbytes: dgrams[j] = dgram.Dgram(view=self.bigdata[j], config=self.dm.configs[j], offset=d_offset) bd_evt = Event(dgrams, run=self.dm.run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return bd_evt
def __next__(self): if self.cn_events == self.n_events: raise StopIteration smd_evt = Event._from_bytes(self.smd_configs, self.smd_events[self.cn_events], run=self.dm.get_run()) if len(self.dm.xtc_files ) == 0 or smd_evt.service() != TransitionId.L1Accept: self.cn_events += 1 self._inc_prometheus_counter('evts') return smd_evt if self.filter_fn: bd_dgrams = [] read_size = 0 for smd_i, smd_dgram in enumerate(smd_evt._dgrams): if self.use_smds[smd_i]: bd_dgrams.append(smd_dgram) else: offset_and_size = smd_evt.get_offset_and_size(smd_i) read_size += offset_and_size[0, 1] bd_dgrams.append( self._read_dgram_from_disk(smd_i, offset_and_size)) bd_evt = Event(dgrams=bd_dgrams, run=self.dm.get_run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return bd_evt dgrams = [None] * self.n_smd_files ofsz = self.ofsz_batch[self.cn_events, :, :] for i_smd in range(self.n_smd_files): d_offset, d_size = ofsz[i_smd] if d_size and d_offset + d_size <= \ memoryview(self.bigdata[i_smd]).nbytes: dgrams[i_smd] = dgram.Dgram(view=self.bigdata[i_smd], config=self.dm.configs[i_smd], offset=d_offset) bd_evt = Event(dgrams, run=self.dm.get_run()) self.cn_events += 1 self._inc_prometheus_counter('evts') return bd_evt
def _calc_offset_and_size(self, first_L1_pos, offsets, sizes): for i_evt, event_bytes in enumerate(self.smd_events[first_L1_pos:]): j_evt = i_evt + first_L1_pos if event_bytes: smd_evt = Event._from_bytes(self.smd_configs, event_bytes, run=self.dm.get_run()) for i_smd, smd_dgram in enumerate(smd_evt._dgrams): if self.use_smds[i_smd]: d_size = smd_dgram._size self.bigdata[i_smd].extend(smd_dgram) else: d_size = smd_evt.get_offset_and_size(i_smd)[ 0, 1] # only need size if j_evt > 0: prev_d_offset = self.ofsz_batch[j_evt - 1, i_smd, 0] prev_d_size = self.ofsz_batch[j_evt - 1, i_smd, 1] d_offset = prev_d_offset + prev_d_size else: d_offset = 0 self.ofsz_batch[j_evt, i_smd] = [d_offset, d_size] sizes[i_smd] += d_size