def setup_raw_data_analysis(self): self.interpreters = [] self.hits = [] for _ in range(self.n_modules): interpreter = PyDataInterpreter() interpreter.create_empty_event_hits(True) interpreter.set_trigger_data_format(1) interpreter.align_at_trigger(True) interpreter.set_warning_output(False) interpreter.set_FEI4B(True) self.interpreters.append(interpreter) self.hits.append(np.ascontiguousarray(np.empty(shape=(0,),dtype = self.multi_chip_event_dtype,order='C')))
def _module_worker(self,socket_addr, moduleID, send_end): '''one worker for each FE chip, since RAW data comes from FIFO separated by moduleID. It is necessary to instantiate zmq.Context() in calling method. Otherwise it has no acces when called as multiprocessing.process. ''' context = zmq.Context() socket_pull = context.socket(zmq.PULL) # subscriber # socket_pull.setsockopt(zmq.SUBSCRIBE, '') # do not filter any data needed for PUB/SUB but not for PUSH/PULL socket_pull.bind(socket_addr) self.logger.info("Worker %s started, socket %s" % (moduleID, socket_addr)) hit_array = np.zeros(shape=(0,),dtype = self.multi_chip_event_dtype,order='C') counter = 0 interpreter = PyDataInterpreter() interpreter.create_empty_event_hits(True) interpreter.set_trigger_data_format(1) interpreter.align_at_trigger(True) interpreter.set_warning_output(False) interpreter.set_FEI4B(True) while not self._stop_readout.wait(0.01) : # use wait(), do not block here if self.worker_reset_flag.is_set() and not self.worker_reset_finished[moduleID].is_set(): with self.reset_lock: interpreter.reset() self.logger.info("Resetting worker %s" % (moduleID)) self.worker_reset_finished[moduleID].set() if self.EoS_flag.is_set() and not self.worker_finished_flags[moduleID].is_set(): # EoS_flag is set in run_control after reception of EoS command while counter < 3: self.dummy_flag.wait(0.03) try: meta_data = socket_pull.recv_json(flags=zmq.NOBLOCK) except zmq.Again: pass else: name = meta_data.pop('name') if name == 'ReadoutData': data = socket_pull.recv() # reconstruct numpy array buf = buffer(data) dtype = meta_data.pop('dtype') shape = meta_data.pop('shape') data_array = np.frombuffer(buf, dtype=dtype).reshape(shape) interpreter.interpret_raw_data(data_array) # self.analyze_raw_data(raw_data=np.ascontiguousarray(data_array), module=moduleID) # build new array with moduleID, take only necessary data hits = interpreter.get_hits() # hits = self.interpreters[moduleID].get_hits() module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C') module_hits['event_number'] = hits['event_number'] module_hits['trigger_number'] = hits['trigger_number'] module_hits['trigger_time_stamp'] = hits['trigger_time_stamp'] module_hits['relative_BCID'] = hits['relative_BCID'] module_hits['column'] = hits['column'] module_hits['row'] = hits['row'] module_hits['tot'] = hits['tot'] module_hits['moduleID'] = moduleID hit_array = np.concatenate((hit_array,module_hits)) counter +=1 # if hit_array.shape[0] > 0: # self.logger.info("hit array shape worker %s before store event" % moduleID, hit_array.shape) # self.logger.info("hit array worker %s last entry before store event" % moduleID, hit_array[-1]) # interpreter.store_event() # # hits = interpreter.get_hits() # # module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C') # module_hits['event_number'] = hits['event_number'] # module_hits['trigger_number'] = hits['trigger_number'] # module_hits['trigger_time_stamp'] = hits['trigger_time_stamp'] # module_hits['relative_BCID'] = hits['relative_BCID'] # module_hits['column'] = hits['column'] # module_hits['row'] = hits['row'] # module_hits['tot'] = hits['tot'] # module_hits['moduleID'] = moduleID # # append chunk to hit array # hit_array = np.concatenate((hit_array,module_hits)) # if hit_array.shape[0] > 0: # self.logger.info("hit array shape worker %s " % moduleID, hit_array.shape) # self.logger.info("hit array worker %s last entry" % moduleID, hit_array[-1]) self.send_data_flag[moduleID].set() if hit_array.shape[0] > 0 and self.n_spills.value > 1: hit_array2 = hit_array[np.where(hit_array['event_number'] != hit_array[0]['event_number'])] send_array = hit_array2.copy() else: send_array = hit_array.copy() send_end.send(send_array) # send_array = hit_array.copy() # send_end.send(hit_array) self.worker_finished_flags[moduleID].set() self.logger.info("Worker %s finished, received %s hits" % (moduleID , hit_array.shape)) hit_array = np.zeros(shape=(0,),dtype = self.multi_chip_event_dtype,order='C') counter = 0 try: meta_data = socket_pull.recv_json(flags=zmq.NOBLOCK) except zmq.Again: pass else: name = meta_data.pop('name') if name == 'ReadoutData': data = socket_pull.recv() # reconstruct numpy array buf = buffer(data) dtype = meta_data.pop('dtype') shape = meta_data.pop('shape') data_array = np.frombuffer(buf, dtype=dtype).reshape(shape) interpreter.interpret_raw_data(data_array) # self.analyze_raw_data(raw_data=np.ascontiguousarray(data_array), module=moduleID) # build new array with moduleID, take only necessary data hits = interpreter.get_hits() # hits = self.interpreters[moduleID].get_hits() module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C') module_hits['event_number'] = hits['event_number'] module_hits['trigger_number'] = hits['trigger_number'] module_hits['trigger_time_stamp'] = hits['trigger_time_stamp'] module_hits['relative_BCID'] = hits['relative_BCID'] module_hits['column'] = hits['column'] module_hits['row'] = hits['row'] module_hits['tot'] = hits['tot'] module_hits['moduleID'] = moduleID # append chunk to hit array # hit_array = np.r_[hit_array,module_hits] hit_array = np.concatenate((hit_array,module_hits))