Esempio n. 1
0
class DataWorker(QtCore.QObject):
    run_start = QtCore.pyqtSignal()
    run_config_data = QtCore.pyqtSignal(dict)
    global_config_data = QtCore.pyqtSignal(dict)
    filename = QtCore.pyqtSignal(dict)
    interpreted_data = QtCore.pyqtSignal(dict)
    meta_data = QtCore.pyqtSignal(dict)
    finished = QtCore.pyqtSignal()

    def __init__(self):
        QtCore.QObject.__init__(self)
        self.integrate_readouts = 1
        self.n_readout = 0
        self._stop_readout = Event()
        self.setup_raw_data_analysis()
        self.reset_lock = Lock()

    def setup_raw_data_analysis(self):
        self.interpreter = PyDataInterpreter()
        self.histogram = PyDataHistograming()
        self.interpreter.set_warning_output(False)
        self.histogram.set_no_scan_parameter()
        self.histogram.create_occupancy_hist(True)
        self.histogram.create_rel_bcid_hist(True)
        self.histogram.create_tot_hist(True)
        self.histogram.create_tdc_hist(True)
        try:
            self.histogram.create_tdc_distance_hist(True)
            self.interpreter.use_tdc_trigger_time_stamp(True)
        except AttributeError:
            self.has_tdc_distance = False
        else:
            self.has_tdc_distance = True

    def connect(self, socket_addr):
        self.socket_addr = socket_addr
        self.context = zmq.Context()
        self.socket_pull = self.context.socket(zmq.SUB)  # subscriber
        self.socket_pull.setsockopt(zmq.SUBSCRIBE,
                                    '')  # do not filter any data
        self.socket_pull.connect(self.socket_addr)

    def on_set_integrate_readouts(self, value):
        self.integrate_readouts = value

    def reset(self):
        with self.reset_lock:
            self.histogram.reset()
            self.interpreter.reset()
            self.n_readout = 0

    def analyze_raw_data(self, raw_data):
        self.interpreter.interpret_raw_data(raw_data)
        self.histogram.add_hits(self.interpreter.get_hits())

    def process_data(
        self
    ):  # infinite loop via QObject.moveToThread(), does not block event loop
        while (not self._stop_readout.wait(0.01)
               ):  # use wait(), do not block here
            with self.reset_lock:
                try:
                    meta_data = self.socket_pull.recv_json(flags=zmq.NOBLOCK)
                except zmq.Again:
                    pass
                else:
                    name = meta_data.pop('name')
                    if name == 'ReadoutData':
                        data = self.socket_pull.recv()
                        # reconstruct numpy array
                        buf = buffer(data)
                        dtype = meta_data.pop('dtype')
                        shape = meta_data.pop('shape')
                        data_array = np.frombuffer(buf,
                                                   dtype=dtype).reshape(shape)
                        # count readouts and reset
                        self.n_readout += 1
                        if self.integrate_readouts != 0 and self.n_readout % self.integrate_readouts == 0:
                            self.histogram.reset()
                            # we do not want to reset interpreter to keep the error counters
        #                         self.interpreter.reset()
        # interpreted data
                        self.analyze_raw_data(data_array)
                        if self.integrate_readouts == 0 or self.n_readout % self.integrate_readouts == self.integrate_readouts - 1:
                            interpreted_data = {
                                'occupancy':
                                self.histogram.get_occupancy(),
                                'tot_hist':
                                self.histogram.get_tot_hist(),
                                'tdc_counters':
                                self.interpreter.get_tdc_counters(),
                                'tdc_distance':
                                self.interpreter.get_tdc_distance()
                                if self.has_tdc_distance else np.zeros(
                                    (256, ), dtype=np.uint8),
                                'error_counters':
                                self.interpreter.get_error_counters(),
                                'service_records_counters':
                                self.interpreter.get_service_records_counters(
                                ),
                                'trigger_error_counters':
                                self.interpreter.get_trigger_error_counters(),
                                'rel_bcid_hist':
                                self.histogram.get_rel_bcid_hist()
                            }
                            self.interpreted_data.emit(interpreted_data)
                        # meta data
                        meta_data.update({
                            'n_hits':
                            self.interpreter.get_n_hits(),
                            'n_events':
                            self.interpreter.get_n_events()
                        })
                        self.meta_data.emit(meta_data)
                    elif name == 'RunConf':
                        self.run_config_data.emit(meta_data)
                    elif name == 'GlobalRegisterConf':
                        trig_count = int(meta_data['conf']['Trig_Count'])
                        self.interpreter.set_trig_count(trig_count)
                        self.global_config_data.emit(meta_data)
                    elif name == 'Reset':
                        self.histogram.reset()
                        self.interpreter.reset()
                        self.run_start.emit()
                    elif name == 'Filename':
                        self.filename.emit(meta_data)
        self.finished.emit()

    def stop(self):
        self._stop_readout.set()
class PybarFEI4Histogrammer(Transceiver):
    def setup_transceiver(self):
        self.set_bidirectional_communication(
        )  # We want to be able to change the histogrammmer settings

    def setup_interpretation(self):
        self.histograming = PyDataHistograming()
        self.histograming.set_no_scan_parameter()
        self.histograming.create_occupancy_hist(True)
        self.histograming.create_rel_bcid_hist(True)
        self.histograming.create_tot_hist(True)
        self.histograming.create_tdc_hist(True)

        # Variables
        self.n_readouts = 0
        self.readout = 0
        self.fps = 0  # data frames per second
        self.hps = 0  # hits per second
        self.eps = 0  # events per second
        self.plot_delay = 0
        self.total_hits = 0
        self.total_events = 0
        self.updateTime = time.time()

        # Histogrammes from interpretation stored for summing
        self.tdc_counters = None
        self.error_counters = None
        self.service_records_counters = None
        self.trigger_error_counters = None

    def deserialze_data(self, data):
        return jsonapi.loads(data, object_hook=utils.json_numpy_obj_hook)

    def interpret_data(self, data):
        # Meta data is directly forwarded to the receiver, only hit data and event counters are histogramed
        if 'meta_data' in data[0][1]:  # 0 for frontend index, 1 for data dict
            meta_data = data[0][1]['meta_data']
            now = time.time()
            recent_total_hits = meta_data['n_hits']
            recent_total_events = meta_data['n_events']
            recent_fps = 1.0 / (now - self.updateTime)  # calculate FPS
            recent_hps = (recent_total_hits -
                          self.total_hits) / (now - self.updateTime)
            recent_eps = (recent_total_events -
                          self.total_events) / (now - self.updateTime)
            self.updateTime = now
            self.total_hits = recent_total_hits
            self.total_events = recent_total_events
            self.fps = self.fps * 0.7 + recent_fps * 0.3
            self.hps = self.hps + (recent_hps - self.hps) * 0.3 / self.fps
            self.eps = self.eps + (recent_eps - self.eps) * 0.3 / self.fps
            meta_data.update({
                'fps': self.fps,
                'hps': self.hps,
                'total_hits': self.total_hits,
                'eps': self.eps,
                'total_events': self.total_events
            })
            return [data[0][1]]

        self.readout += 1

        if self.n_readouts != 0:  # 0 for infinite integration
            if self.readout % self.n_readouts == 0:
                self.histograming.reset()
                self.tdc_counters = np.zeros_like(self.tdc_counters)
                self.error_counters = np.zeros_like(self.error_counters)
                self.service_records_counters = np.zeros_like(
                    self.service_records_counters)
                self.trigger_error_counters = np.zeros_like(
                    self.trigger_error_counters)
                self.readouts = 0

        interpreted_data = data[0][1]

        self.histograming.add_hits(interpreted_data['hits'])

        # Sum up interpreter histograms
        if self.tdc_counters is not None:
            self.tdc_counters += interpreted_data['tdc_counters']
        else:
            self.tdc_counters = interpreted_data['tdc_counters'].copy(
            )  # Copy needed to give ownage to histogrammer
        if self.error_counters is not None:
            self.error_counters += interpreted_data['error_counters']
        else:
            self.error_counters = interpreted_data['error_counters'].copy(
            )  # Copy needed to give ownage to histogrammer
        if self.service_records_counters is not None:
            self.service_records_counters += interpreted_data[
                'service_records_counters']
        else:
            self.service_records_counters = interpreted_data[
                'service_records_counters'].copy(
                )  # Copy needed to give ownage to histogrammer
        if self.trigger_error_counters is not None:
            self.trigger_error_counters += interpreted_data[
                'trigger_error_counters']
        else:
            self.trigger_error_counters = interpreted_data[
                'trigger_error_counters'].copy(
                )  # Copy needed to give ownage to histogrammer

        histogrammed_data = {
            'occupancy': self.histograming.get_occupancy(),
            'tot_hist': self.histograming.get_tot_hist(),
            'tdc_counters': self.tdc_counters,
            'error_counters': self.error_counters,
            'service_records_counters': self.service_records_counters,
            'trigger_error_counters': self.trigger_error_counters,
            'rel_bcid_hist': self.histograming.get_rel_bcid_hist()
        }

        return [histogrammed_data]

    def serialze_data(self, data):
        return jsonapi.dumps(data, cls=utils.NumpyEncoder)

    def handle_command(self, command):
        if command[0] == 'RESET':  # Reset command to reset the histograms
            self.histograming.reset()
            self.tdc_counters = np.zeros_like(self.tdc_counters)
            self.error_counters = np.zeros_like(self.error_counters)
            self.service_records_counters = np.zeros_like(
                self.service_records_counters)
            self.trigger_error_counters = np.zeros_like(
                self.trigger_error_counters)
        else:
            self.n_readouts = int(command[0])
class PybarFEI4Histogrammer(Transceiver):

    def setup_transceiver(self):
        self.set_bidirectional_communication()  # We want to be able to change the histogrammmer settings

    def setup_interpretation(self):
        self.histograming = PyDataHistograming()
        self.histograming.set_no_scan_parameter()
        self.histograming.create_occupancy_hist(True)
        self.histograming.create_rel_bcid_hist(True)
        self.histograming.create_tot_hist(True)
        self.histograming.create_tdc_hist(True)
        # Variables
        self.n_readouts = 0
        self.readout = 0
        self.fps = 0  # data frames per second
        self.hps = 0  # hits per second
        self.eps = 0  # events per second
        self.plot_delay = 0
        self.total_hits = 0
        self.total_events = 0
        self.updateTime = time.time()
        # Histogrammes from interpretation stored for summing
        self.tdc_counters = None
        self.error_counters = None
        self.service_records_counters = None
        self.trigger_error_counters = None

    def deserialze_data(self, data):
        return jsonapi.loads(data, object_hook=utils.json_numpy_obj_hook)

    def interpret_data(self, data):
        if 'meta_data' in data[0][1]:  # Meta data is directly forwarded to the receiver, only hit data, event counters are histogramed; 0 from frontend index, 1 for data dict
            meta_data = data[0][1]['meta_data']
            now = time.time()
            recent_total_hits = meta_data['n_hits']
            recent_total_events = meta_data['n_events']
            recent_fps = 1.0 / (now - self.updateTime)  # calculate FPS
            recent_hps = (recent_total_hits - self.total_hits) / (now - self.updateTime)
            recent_eps = (recent_total_events - self.total_events) / (now - self.updateTime)
            self.updateTime = now
            self.total_hits = recent_total_hits
            self.total_events = recent_total_events
            self.fps = self.fps * 0.7 + recent_fps * 0.3
            self.hps = self.hps + (recent_hps - self.hps) * 0.3 / self.fps
            self.eps = self.eps + (recent_eps - self.eps) * 0.3 / self.fps
            meta_data.update({'fps': self.fps, 'hps': self.hps, 'total_hits': self.total_hits, 'eps': self.eps, 'total_events': self.total_events})
            return [data[0][1]]

        self.readout += 1

        if self.n_readouts != 0:  # = 0 for infinite integration
            if self.readout % self.n_readouts == 0:
                self.histograming.reset()
                self.tdc_counters = np.zeros_like(self.tdc_counters)
                self.error_counters = np.zeros_like(self.error_counters)
                self.service_records_counters = np.zeros_like(self.service_records_counters)
                self.trigger_error_counters = np.zeros_like(self.trigger_error_counters)
                self.readouts = 0

        interpreted_data = data[0][1]

        self.histograming.add_hits(interpreted_data['hits'])

        # Sum up interpreter histograms
        if self.tdc_counters is not None:
            self.tdc_counters += interpreted_data['tdc_counters']
        else:
            self.tdc_counters = interpreted_data['tdc_counters'].copy()  # Copy needed to give ownage to histogrammer
        if self.error_counters is not None:
            self.error_counters += interpreted_data['error_counters']
        else:
            self.error_counters = interpreted_data['error_counters'].copy()  # Copy needed to give ownage to histogrammer
        if self.service_records_counters is not None:
            self.service_records_counters += interpreted_data['service_records_counters']
        else:
            self.service_records_counters = interpreted_data['service_records_counters'].copy()  # Copy needed to give ownage to histogrammer
        if self.trigger_error_counters is not None:
            self.trigger_error_counters += interpreted_data['trigger_error_counters']
        else:
            self.trigger_error_counters = interpreted_data['trigger_error_counters'].copy()  # Copy needed to give ownage to histogrammer

        histogrammed_data = {
            'occupancy': self.histograming.get_occupancy(),
            'tot_hist': self.histograming.get_tot_hist(),
            'tdc_counters': self.tdc_counters,
            'error_counters': self.error_counters,
            'service_records_counters': self.service_records_counters,
            'trigger_error_counters': self.trigger_error_counters,
            'rel_bcid_hist': self.histograming.get_rel_bcid_hist()
        }

        return [histogrammed_data]

    def serialze_data(self, data):
        return jsonapi.dumps(data, cls=utils.NumpyEncoder)

    def handle_command(self, command):
        if command[0] == 'RESET':
            self.histograming.reset()
            self.tdc_counters = np.zeros_like(self.tdc_counters)
            self.error_counters = np.zeros_like(self.error_counters)
            self.service_records_counters = np.zeros_like(self.service_records_counters)
            self.trigger_error_counters = np.zeros_like(self.trigger_error_counters)
        else:
            self.n_readouts = int(command[0])
class ThresholdBaselineTuning(Fei4RunBase):
    '''Threshold Baseline Tuning

    Tuning the FEI4 to the lowest possible threshold (GDAC and TDAC). Feedback current will not be tuned.
    NOTE: In case of RX errors decrease the trigger frequency (= increase trigger_rate_limit)
    NOTE: To increase the TDAC range, decrease TdacVbp.
    '''
    _default_run_conf = {
        "occupancy_limit": 0,  # occupancy limit, when reached the TDAC will be decreased (increasing threshold). 0 will mask any pixel with occupancy greater than zero
        "scan_parameters": [('Vthin_AltFine', (120, None)), ('Step', 60)],  # the Vthin_AltFine range, number of steps (repetition at constant Vthin_AltFine)
        "increase_threshold": 5,  # increasing the global threshold (Vthin_AltFine) after tuning
        "disabled_pixels_limit": 0.01,  # limit of disabled pixels, fraction of all pixels
        "use_enable_mask": False,  # if True, enable mask from config file anded with mask (from col_span and row_span), if False use mask only for enable mask
        "n_triggers": 10000,  # total number of trigger sent to FE
        "trigger_rate_limit": 500,  # artificially limiting the trigger rate, in BCs (25ns)
        "trig_count": 0,  # FE-I4 trigger count, number of consecutive BCs, 0 means 16, from 0 to 15
        "col_span": [1, 80],  # column range (from minimum to maximum value). From 1 to 80.
        "row_span": [1, 336],  # row range (from minimum to maximum value). From 1 to 336.
    }

    def configure(self):
        if self.trig_count == 0:
            self.consecutive_lvl1 = (2 ** self.register.global_registers['Trig_Count']['bitlength'])
        else:
            self.consecutive_lvl1 = self.trig_count
        self.abs_occ_limit = int(self.occupancy_limit * self.n_triggers * self.consecutive_lvl1)
        if self.abs_occ_limit <= 0:
            logging.info('Any noise hit will lead to an increased pixel threshold.')
        else:
            logging.info('The pixel threshold of any pixel with an occpancy >%d will be increased' % self.abs_occ_limit)
            

        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        # TDAC
        tdac_max = 2 ** self.register.pixel_registers['TDAC']['bitlength'] - 1
        self.register.set_pixel_register_value("TDAC", tdac_max)
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span)
        # Enable
        if self.use_enable_mask:
            self.register.set_pixel_register_value("Enable", np.logical_and(mask, self.register.get_pixel_register_value("Enable")))
        else:
            self.register.set_pixel_register_value("Enable", mask)
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        # Imon
        self.register.set_pixel_register_value('Imon', 1)
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='Imon'))
        # C_High
        self.register.set_pixel_register_value('C_High', 0)
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_High'))
        # C_Low
        self.register.set_pixel_register_value('C_Low', 0)
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_Low'))
        # Registers
#         self.register.set_global_register_value("Trig_Lat", self.trigger_latency)  # set trigger latency
        self.register.set_global_register_value("Trig_Count", self.trig_count)  # set number of consecutive triggers
        commands.extend(self.register.get_commands("WrRegister", name=["Trig_Count"]))
        commands.extend(self.register.get_commands("RunMode"))
        self.register_utils.send_commands(commands)

        self.interpreter = PyDataInterpreter()
        self.histogram = PyDataHistograming()
        self.interpreter.set_trig_count(self.trig_count)
        self.interpreter.set_warning_output(False)
        self.histogram.set_no_scan_parameter()
        self.histogram.create_occupancy_hist(True)

    def scan(self):
        scan_parameter_range = [self.register.get_global_register_value("Vthin_AltFine"), 0]
        if self.scan_parameters.Vthin_AltFine[0]:
            scan_parameter_range[0] = self.scan_parameters.Vthin_AltFine[0]
        if self.scan_parameters.Vthin_AltFine[1]:
            scan_parameter_range[1] = self.scan_parameters.Vthin_AltFine[1]
        steps = 1
        if self.scan_parameters.Step:
            steps = self.scan_parameters.Step

        lvl1_command = self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", length=self.trigger_rate_limit)[0]
        self.total_scan_time = int(lvl1_command.length() * 25 * (10 ** -9) * self.n_triggers)

        preselected_pixels = invert_pixel_mask(self.register.get_pixel_register_value('Enable')).sum()
        disabled_pixels_limit_cnt = int(self.disabled_pixels_limit * self.register.get_pixel_register_value('Enable').sum())
        disabled_pixels = 0
        self.last_reg_val = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_step = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_good_threshold = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_good_tdac = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_good_enable_mask = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_occupancy_hist = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)
        self.last_occupancy_mask = deque([None] * self.increase_threshold, maxlen=self.increase_threshold + 1)

        for reg_val in range(scan_parameter_range[0], scan_parameter_range[1] - 1, -1):
            if self.stop_run.is_set():
                break
            logging.info('Scanning Vthin_AltFine %d', reg_val)
            commands = []
            commands.extend(self.register.get_commands("ConfMode"))
            self.register.set_global_register_value("Vthin_AltFine", reg_val)  # set number of consecutive triggers
            commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
            # setting FE into RunMode
            commands.extend(self.register.get_commands("RunMode"))
            self.register_utils.send_commands(commands)
            step = 0
            while True:
                if self.stop_run.is_set():
                    break
                self.histogram.reset()
                step += 1
                logging.info('Step %d / %d at Vthin_AltFine %d', step, steps, reg_val)
                logging.info('Estimated scan time: %ds', self.total_scan_time)

                with self.readout(Vthin_AltFine=reg_val, Step=step, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data):
                    got_data = False
                    start = time()
                    self.register_utils.send_command(lvl1_command, repeat=self.n_triggers, wait_for_finish=False, set_length=True, clear_memory=False)
                    while not self.stop_run.wait(0.1):
                        if self.register_utils.is_ready:
                            if got_data:
                                self.progressbar.finish()
                            logging.info('Finished sending %d triggers', self.n_triggers)
                            break
                        if not got_data:
                            if self.fifo_readout.data_words_per_second() > 0:
                                got_data = True
                                logging.info('Taking data...')
                                self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.Timer()], maxval=self.total_scan_time, poll=10, term_width=80).start()
                        else:
                            try:
                                self.progressbar.update(time() - start)
                            except ValueError:
                                pass
                # Use fast C++ hit histogramming to save time
                raw_data = np.ascontiguousarray(data_array_from_data_iterable(self.fifo_readout.data), dtype=np.uint32)
                self.interpreter.interpret_raw_data(raw_data)
                self.interpreter.store_event()  # force to create latest event
                self.histogram.add_hits(self.interpreter.get_hits())
                occ_hist = self.histogram.get_occupancy()[:, :, 0]
                # noisy pixels are set to 1
                occ_mask = np.zeros(shape=occ_hist.shape, dtype=np.dtype('>u1'))
                occ_mask[occ_hist > self.abs_occ_limit] = 1

                tdac_reg = self.register.get_pixel_register_value('TDAC')
                decrease_pixel_mask = np.logical_and(occ_mask > 0, tdac_reg > 0)
                disable_pixel_mask = np.logical_and(occ_mask > 0, tdac_reg == 0)
                enable_reg = self.register.get_pixel_register_value('Enable')
                enable_mask = np.logical_and(enable_reg, invert_pixel_mask(disable_pixel_mask))
                if np.logical_and(occ_mask > 0, enable_reg == 0).sum():
                    logging.warning('Received data from disabled pixels')
#                     disabled_pixels += disable_pixel_mask.sum()  # can lead to wrong values if the enable reg is corrupted
                disabled_pixels = invert_pixel_mask(enable_mask).sum() - preselected_pixels
                if disabled_pixels > disabled_pixels_limit_cnt:
                    logging.info('Limit of disabled pixels reached: %d (limit %d)... stopping scan' % (disabled_pixels, disabled_pixels_limit_cnt))
                    break
                else:
                    logging.info('Increasing threshold of %d pixel(s)', decrease_pixel_mask.sum())
                    logging.info('Disabling %d pixel(s), total number of disabled pixel(s): %d', disable_pixel_mask.sum(), disabled_pixels)
                    tdac_reg[decrease_pixel_mask] -= 1
                    self.register.set_pixel_register_value('TDAC', tdac_reg)
                    self.register.set_pixel_register_value('Enable', enable_mask)
                    commands = []
                    commands.extend(self.register.get_commands("ConfMode"))
                    commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='TDAC'))
                    commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='Enable'))
                    commands.extend(self.register.get_commands("RunMode"))
                    self.register_utils.send_commands(commands)
                    if occ_mask.sum() == 0 or step == steps or decrease_pixel_mask.sum() < disabled_pixels_limit_cnt:
                        self.last_reg_val.appendleft(reg_val)
                        self.last_step.appendleft(step)
                        self.last_good_threshold.appendleft(self.register.get_global_register_value("Vthin_AltFine"))
                        self.last_good_tdac.appendleft(self.register.get_pixel_register_value("TDAC"))
                        self.last_good_enable_mask.appendleft(self.register.get_pixel_register_value("Enable"))
                        self.last_occupancy_hist.appendleft(occ_hist.copy())
                        self.last_occupancy_mask.appendleft(occ_mask.copy())
                        break
                    else:
                        logging.info('Found %d noisy pixels... repeat tuning step for Vthin_AltFine %d', occ_mask.sum(), reg_val)

            if disabled_pixels > disabled_pixels_limit_cnt or scan_parameter_range[1] == reg_val:
                break

    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold[self.increase_threshold])
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac[self.increase_threshold])
        self.register.set_pixel_register_value('Enable', self.last_good_enable_mask[0])  # use enable mask from the lowest point to mask bad pixels
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist[self.increase_threshold].T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask[self.increase_threshold].T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_three_way(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
            plot_occupancy(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=31, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[self.increase_threshold].T, title='Intermediate Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[0].T, title='Final Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[0], self.last_step[0]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[0].T, filename=analyze_raw_data.output_pdf)