class DataWorker(QtCore.QObject):
    run_start = QtCore.pyqtSignal()
    run_config_data = QtCore.pyqtSignal(dict)
    global_config_data = QtCore.pyqtSignal(dict)
    filename = QtCore.pyqtSignal(dict)
    interpreted_data = QtCore.pyqtSignal(dict)
    meta_data = QtCore.pyqtSignal(dict)
    finished = QtCore.pyqtSignal()

    def __init__(self):
        QtCore.QObject.__init__(self)
        self.integrate_readouts = 1
        self.n_readout = 0
        self._stop_readout = Event()
        self.setup_raw_data_analysis()
        self.reset_lock = Lock()

    def setup_raw_data_analysis(self):
        self.interpreter = PyDataInterpreter()
        self.histogram = PyDataHistograming()
        self.interpreter.set_warning_output(False)
        self.histogram.set_no_scan_parameter()
        self.histogram.create_occupancy_hist(True)
        self.histogram.create_rel_bcid_hist(True)
        self.histogram.create_tot_hist(True)
        self.histogram.create_tdc_hist(True)
        try:
            self.histogram.create_tdc_distance_hist(True)
            self.interpreter.use_tdc_trigger_time_stamp(True)
        except AttributeError:
            self.has_tdc_distance = False
        else:
            self.has_tdc_distance = True

    def connect(self, socket_addr):
        self.socket_addr = socket_addr
        self.context = zmq.Context()
        self.socket_pull = self.context.socket(zmq.SUB)  # subscriber
        self.socket_pull.setsockopt(zmq.SUBSCRIBE,
                                    '')  # do not filter any data
        self.socket_pull.connect(self.socket_addr)

    def on_set_integrate_readouts(self, value):
        self.integrate_readouts = value

    def reset(self):
        with self.reset_lock:
            self.histogram.reset()
            self.interpreter.reset()
            self.n_readout = 0

    def analyze_raw_data(self, raw_data):
        self.interpreter.interpret_raw_data(raw_data)
        self.histogram.add_hits(self.interpreter.get_hits())

    def process_data(
        self
    ):  # infinite loop via QObject.moveToThread(), does not block event loop
        while (not self._stop_readout.wait(0.01)
               ):  # use wait(), do not block here
            with self.reset_lock:
                try:
                    meta_data = self.socket_pull.recv_json(flags=zmq.NOBLOCK)
                except zmq.Again:
                    pass
                else:
                    name = meta_data.pop('name')
                    if name == 'ReadoutData':
                        data = self.socket_pull.recv()
                        # reconstruct numpy array
                        buf = buffer(data)
                        dtype = meta_data.pop('dtype')
                        shape = meta_data.pop('shape')
                        data_array = np.frombuffer(buf,
                                                   dtype=dtype).reshape(shape)
                        # count readouts and reset
                        self.n_readout += 1
                        if self.integrate_readouts != 0 and self.n_readout % self.integrate_readouts == 0:
                            self.histogram.reset()
                            # we do not want to reset interpreter to keep the error counters
        #                         self.interpreter.reset()
        # interpreted data
                        self.analyze_raw_data(data_array)
                        if self.integrate_readouts == 0 or self.n_readout % self.integrate_readouts == self.integrate_readouts - 1:
                            interpreted_data = {
                                'occupancy':
                                self.histogram.get_occupancy(),
                                'tot_hist':
                                self.histogram.get_tot_hist(),
                                'tdc_counters':
                                self.interpreter.get_tdc_counters(),
                                'tdc_distance':
                                self.interpreter.get_tdc_distance()
                                if self.has_tdc_distance else np.zeros(
                                    (256, ), dtype=np.uint8),
                                'error_counters':
                                self.interpreter.get_error_counters(),
                                'service_records_counters':
                                self.interpreter.get_service_records_counters(
                                ),
                                'trigger_error_counters':
                                self.interpreter.get_trigger_error_counters(),
                                'rel_bcid_hist':
                                self.histogram.get_rel_bcid_hist()
                            }
                            self.interpreted_data.emit(interpreted_data)
                        # meta data
                        meta_data.update({
                            'n_hits':
                            self.interpreter.get_n_hits(),
                            'n_events':
                            self.interpreter.get_n_events()
                        })
                        self.meta_data.emit(meta_data)
                    elif name == 'RunConf':
                        self.run_config_data.emit(meta_data)
                    elif name == 'GlobalRegisterConf':
                        trig_count = int(meta_data['conf']['Trig_Count'])
                        self.interpreter.set_trig_count(trig_count)
                        self.global_config_data.emit(meta_data)
                    elif name == 'Reset':
                        self.histogram.reset()
                        self.interpreter.reset()
                        self.run_start.emit()
                    elif name == 'Filename':
                        self.filename.emit(meta_data)
        self.finished.emit()

    def stop(self):
        self._stop_readout.set()
Example #2
0
    def _module_worker(self,socket_addr, moduleID, send_end):
        '''one worker for each FE chip, since RAW data comes from FIFO separated by moduleID.
           It is necessary to instantiate zmq.Context() in calling method. Otherwise it has no acces when called as multiprocessing.process.
        '''
        context = zmq.Context()
        socket_pull = context.socket(zmq.PULL)  # subscriber
#         socket_pull.setsockopt(zmq.SUBSCRIBE, '')  # do not filter any data needed for PUB/SUB but not for PUSH/PULL
        socket_pull.bind(socket_addr)
        self.logger.info("Worker %s started, socket %s" % (moduleID, socket_addr))
        hit_array = np.zeros(shape=(0,),dtype = self.multi_chip_event_dtype,order='C')
        counter = 0
        interpreter = PyDataInterpreter()
        interpreter.create_empty_event_hits(True)
        interpreter.set_trigger_data_format(1)
        interpreter.align_at_trigger(True)
        interpreter.set_warning_output(False)
        interpreter.set_FEI4B(True)
        
        while not self._stop_readout.wait(0.01) :  # use wait(), do not block here
            if self.worker_reset_flag.is_set() and not self.worker_reset_finished[moduleID].is_set():
                with self.reset_lock:
                    interpreter.reset()
                    self.logger.info("Resetting worker %s" % (moduleID))
                    self.worker_reset_finished[moduleID].set()
            if self.EoS_flag.is_set() and not self.worker_finished_flags[moduleID].is_set(): # EoS_flag is set in run_control after reception of EoS command 
                while counter < 3:
                    self.dummy_flag.wait(0.03)
                    try:
                        meta_data = socket_pull.recv_json(flags=zmq.NOBLOCK)
                    except zmq.Again:
                        pass
                    else:
                        name = meta_data.pop('name')
                        if name == 'ReadoutData':
                            data = socket_pull.recv()
                            # reconstruct numpy array
                            buf = buffer(data)
                            dtype = meta_data.pop('dtype')
                            shape = meta_data.pop('shape')
                            data_array = np.frombuffer(buf, dtype=dtype).reshape(shape)
                            
                            interpreter.interpret_raw_data(data_array)
        #                     self.analyze_raw_data(raw_data=np.ascontiguousarray(data_array), module=moduleID)
                            # build new array with moduleID, take only necessary data
                            hits = interpreter.get_hits()
        #                     hits = self.interpreters[moduleID].get_hits()
                            module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C')
                            module_hits['event_number'] = hits['event_number']
                            module_hits['trigger_number'] = hits['trigger_number']
                            module_hits['trigger_time_stamp'] = hits['trigger_time_stamp']
                            module_hits['relative_BCID'] = hits['relative_BCID']
                            module_hits['column'] = hits['column']
                            module_hits['row'] = hits['row']
                            module_hits['tot'] = hits['tot']
                            module_hits['moduleID'] = moduleID
                            
                            hit_array = np.concatenate((hit_array,module_hits))
                    
                    counter +=1

#                 if hit_array.shape[0] > 0:
#                     self.logger.info("hit array shape worker %s before store event" % moduleID, hit_array.shape)
#                     self.logger.info("hit array worker %s last entry before store event" % moduleID, hit_array[-1])
                
#                 interpreter.store_event()
#                   
#                 hits = interpreter.get_hits()
#  
#                 module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C')
#                 module_hits['event_number'] = hits['event_number']
#                 module_hits['trigger_number'] = hits['trigger_number']
#                 module_hits['trigger_time_stamp'] = hits['trigger_time_stamp']
#                 module_hits['relative_BCID'] = hits['relative_BCID']
#                 module_hits['column'] = hits['column']
#                 module_hits['row'] = hits['row']
#                 module_hits['tot'] = hits['tot']
#                 module_hits['moduleID'] = moduleID
#                 # append chunk to hit array
#                 hit_array = np.concatenate((hit_array,module_hits))
                
#                 if hit_array.shape[0] > 0:
#                     self.logger.info("hit array shape worker %s " % moduleID, hit_array.shape)
#                     self.logger.info("hit array worker %s last entry" % moduleID, hit_array[-1])
                self.send_data_flag[moduleID].set()
                
                if hit_array.shape[0] > 0 and self.n_spills.value > 1:
                    hit_array2 = hit_array[np.where(hit_array['event_number'] != hit_array[0]['event_number'])]
                    send_array = hit_array2.copy()
                else:
                    send_array = hit_array.copy()
                send_end.send(send_array) 
                
#                 send_array = hit_array.copy()
#                 send_end.send(hit_array) 
                self.worker_finished_flags[moduleID].set()
                self.logger.info("Worker %s finished, received %s hits" % (moduleID , hit_array.shape))
                hit_array = np.zeros(shape=(0,),dtype = self.multi_chip_event_dtype,order='C')
                counter = 0
            try:
                meta_data = socket_pull.recv_json(flags=zmq.NOBLOCK)
            except zmq.Again:
                pass
            else:
                name = meta_data.pop('name')
                if name == 'ReadoutData':
                    data = socket_pull.recv()
                    # reconstruct numpy array
                    buf = buffer(data)
                    dtype = meta_data.pop('dtype')
                    shape = meta_data.pop('shape')
                    data_array = np.frombuffer(buf, dtype=dtype).reshape(shape)
                    
                    interpreter.interpret_raw_data(data_array)
#                     self.analyze_raw_data(raw_data=np.ascontiguousarray(data_array), module=moduleID)
                    # build new array with moduleID, take only necessary data
                    hits = interpreter.get_hits()
#                     hits = self.interpreters[moduleID].get_hits()
                    module_hits = np.zeros(shape=(hits.shape[0],),dtype = self.multi_chip_event_dtype, order = 'C')
                    module_hits['event_number'] = hits['event_number']
                    module_hits['trigger_number'] = hits['trigger_number']
                    module_hits['trigger_time_stamp'] = hits['trigger_time_stamp']
                    module_hits['relative_BCID'] = hits['relative_BCID']
                    module_hits['column'] = hits['column']
                    module_hits['row'] = hits['row']
                    module_hits['tot'] = hits['tot']
                    module_hits['moduleID'] = moduleID
                    # append chunk to hit array
#                     hit_array = np.r_[hit_array,module_hits]
                    hit_array = np.concatenate((hit_array,module_hits))