def update_by_event(self, evt): if not evt: return for i, d in enumerate(evt._dgrams): if not d: continue # This releases the original dgram object (friendly # with shared memory which has limited capacity). new_d = Dgram(view=d, config=self.configs[i], offset=0) if new_d.service() == TransitionId.SlowUpdate: self.stores['epics'].add_to(new_d, i) elif new_d.service() == TransitionId.BeginStep or \ new_d.service() == TransitionId.BeginRun: self.stores['scan'].add_to(new_d, i) # For BeginStep, checks if self.configs need to be updated. # Only apply fields w/o leading "_" and exist in the # original config for key, val in d.__dict__.items(): if key.startswith("_") or not hasattr( self.configs[i], key): continue cfgold = getattr(self.configs[i], key) for segid, segment in getattr(new_d, key).items(): # Only apply to fields with .config if hasattr(segment, "config"): self._update_config(cfgold[segid].config, getattr(segment, "config"))
def write_dgram(config, f_out, offset, skip=False): d = Dgram(config=config) offset += memoryview(d).nbytes if not skip: f_out.write(d) print(f'write dgram {memoryview(d).nbytes} bytes') return offset
def update(self, views): """ Updates the store with new data from list of views. """ if views: for i in range(self.n_files): view, step = bytearray(views[i]), self.step_managers[i] offset = 0 while offset < memoryview(view).shape[0]: d = Dgram(view=view, config=step.config, offset=offset) if hasattr(d, self.step_name): step.add(d) offset += d._size
def update_by_event(self, evt): if not evt: return for i, d in enumerate(evt._dgrams): if not d: continue # This releases the original dgram object (friendly # with shared memory which has limited capacity). new_d = Dgram(view=d, config=self.configs[i], offset=0) for key, val in d.__dict__.items(): if key in self.stores: self.stores[key].add_to(new_d, i)
def update_by_views(self, views): if not views: return for i in range(len(views)): view = bytearray(views[i]) offset = 0 while offset < memoryview(view).shape[0]: d = Dgram(view=view, config=self.configs[i], offset=offset) for key, val in d.__dict__.items(): if key in self.stores: self.stores[key].add_to(d, i) offset += d._size
def run_smd0(n_events): filenames = glob.glob(os.path.join(xtc_dir, '.tmp', 'smalldata', '*.xtc2')) fds = [os.open(filename, os.O_RDONLY) for filename in filenames] # Move file ptrs to datagram part configs = [Dgram(file_descriptor=fd) for fd in fds] limit = len(filenames) if len(sys.argv) > 1: limit = int(sys.argv[1]) st = time.time() smdr = SmdReader(fds[:limit]) got_events = -1 processed_events = 0 smdr.get(n_events) got_events = smdr.got_events result = {'each_read': [], 'total_n_events': 0} cn_i = 0 while got_events != 0: step_chunk_nbytes = 0 smd_chunk_nbytes = 0 for i in range(limit): smd_view = smdr.view(i) if smd_view: smd_chunk_nbytes += smd_view.nbytes step_view = smdr.view(i, update=True) if step_view: step_chunk_nbytes += step_view.nbytes result['each_read'].append( [got_events, smd_chunk_nbytes, step_chunk_nbytes]) processed_events += got_events # Read more events smdr.get(n_events) got_events = smdr.got_events cn_i += 1 en = time.time() result['total_n_events'] = processed_events for fd in fds: os.close(fd) return result
def repack_for_bd(smd_batch, step_views, configs): """ EventBuilder Node uses this to prepend missing step views to the smd_batch. Unlike repack_for_eb (used by Smd0), this output chunk contains list of pre-built events.""" if step_views: batch_pf = PacketFooter(view=smd_batch) # Create bytearray containing a list of events from step_views steps = bytearray() n_smds = len(step_views) offsets = [0] * n_smds n_steps = 0 step_sizes = [] while offsets[0] < memoryview(step_views[0]).nbytes: step_pf = PacketFooter(n_packets=n_smds) step_size = 0 for i, (config, view) in enumerate(zip(configs, step_views)): d = Dgram(config=config, view=view, offset=offsets[i]) steps.extend(d) offsets[i] += d._size step_size += d._size step_pf.set_size(i, d._size) steps.extend(step_pf.footer) step_sizes.append(step_size + memoryview(step_pf.footer).nbytes) n_steps += 1 # Create new batch with total_events = smd_batch_events + step_events new_batch_pf = PacketFooter(n_packets=batch_pf.n_packets + n_steps) for i in range(n_steps): new_batch_pf.set_size(i, step_sizes[i]) for i in range(n_steps, new_batch_pf.n_packets): new_batch_pf.set_size(i, batch_pf.get_size(i - n_steps)) new_batch = bytearray() new_batch.extend(steps) new_batch.extend(smd_batch[:memoryview(smd_batch).nbytes - memoryview(batch_pf.footer).nbytes]) new_batch.extend(new_batch_pf.footer) return new_batch else: return smd_batch
def run_smd0(): #filenames = glob.glob('/reg/neh/home/monarin/psana-nersc/psana2/.tmp/smalldata/*.xtc2') filenames = glob.glob('/ffb01/mona/.tmp/smalldata/*.xtc2') fds = np.array([os.open(filename, os.O_RDONLY) for filename in filenames], dtype=np.int32) # Move file ptrs to datagram part configs = [Dgram(file_descriptor=fd) for fd in fds] beginRun = [Dgram(config=config) for config in configs] limit = len(filenames) if len(sys.argv) > 1: limit = int(sys.argv[1]) st = time.time() smdr = SmdReader(fds[:limit], chunksize) got_events = -1 processed_events = 0 offsets = np.zeros(limit, dtype=np.uint64) how_many = smd0_batch_size to_be_read = max_events - processed_events if to_be_read < how_many: how_many = to_be_read smdr.get(how_many) while smdr.got_events > 0: for i in range(limit): view = smdr.view(i) """ if view: cn_dgrams = 0 while offsets[i] < view.shape[0]: d = Dgram(config=configs[i], view=view, offset=offsets[i]) print(f' buf{i} d_id: {cn_dgrams} d_ts {d.timestamp() & 0xffffffff}') offsets[i] += d._size cn_dgrams += 1 #print(f'smdr_man got {memoryview(view).nbytes}') else: #print(f' buf[{i} empty') pass """ processed_events += smdr.got_events if processed_events >= max_events: break how_many = smd0_batch_size to_be_read = max_events - processed_events if to_be_read < how_many: how_many = to_be_read smdr.get(how_many) offsets[:] = 0 """ while smdr.got_events != 0: if smdr.got_events > 0: processed_events += smdr.got_events if processed_events >= max_events: break for i in range(limit): smdr.view(i) smdr.get(n_events) print(f'smdr.got_events={smdr.got_events}') """ en = time.time() print("#Events: %d Elapsed Time (s): %f Rate (MHz): %f" % (processed_events, (en - st), processed_events / ((en - st) * 1e6)))
def write_dgram(config, f_out, offset): d = Dgram(config=config) offset += memoryview(d).nbytes f_out.write(d) print(f'write dgram {memoryview(d).nbytes} bytes') return offset
def write_config(fd_in, f_out): config = Dgram(file_descriptor=fd_in) offset = memoryview(config).nbytes f_out.write(config) print(f'write config {offset} bytes') return offset, config
def get_config(fd_in): config = Dgram(file_descriptor=fd_in) offset = memoryview(config).nbytes return offset, config
from psana.dgram import Dgram import os SLOWNESS = 10 file_in = "/reg/neh/home/monarin/psana-nersc/psana2/.tmp/data-r0001-s01.xtc2" fd = os.open(file_in, os.O_RDONLY) file_in_size = os.path.getsize(file_in) f = open("junk.xtc2", "wb") config = Dgram(file_descriptor=fd) offset = memoryview(config).nbytes cn_dgrams = 1 cn_for_slow = 0 f.write(config) while offset < file_in_size: d = Dgram(config=config) offset += memoryview(d).nbytes write_ok = False if d.service() != 12: write_ok = True else: if cn_for_slow == SLOWNESS - 1: write_ok = True cn_for_slow = 0 else: cn_for_slow += 1 if write_ok: print(memoryview(d).nbytes, d.timestamp(), offset, d.service()) cn_dgrams += 1