コード例 #1
0
def analyze_raw_data(input_files, output_file_hits, interpreter_plots, overwrite_output_files, align_at_trigger=True, align_at_tdc=False, use_tdc_trigger_time_stamp=False, max_tdc_delay=80, interpreter_warnings=False):
    logging.info('Analyze the raw FE data given in ' + str(len(input_files)) + ' files and store the needed data')
    if os.path.isfile(output_file_hits) and not overwrite_output_files:  # skip analysis if already done
        logging.info('Analyzed data file ' + output_file_hits + ' already exists. Skip analysis for this file.')
    else:
        with AnalyzeRawData(raw_data_file=input_files, analyzed_data_file=output_file_hits) as analyze_raw_data:
            analyze_raw_data.max_tdc_delay = max_tdc_delay  # max TDC delay to consider a valid in-time TDC word
            analyze_raw_data.use_tdc_trigger_time_stamp = use_tdc_trigger_time_stamp  # if you want to also measure the delay between trigger / hit-bus
            analyze_raw_data.align_at_trigger = align_at_trigger  # align events at TDC words, first word of event has to be a tdc word
            analyze_raw_data.align_at_tdc = align_at_tdc  # align events at TDC words, first word of event has to be a tdc word
            analyze_raw_data.create_tdc_counter_hist = True  # create a histogram for all TDC words
            analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
            analyze_raw_data.create_tdc_pixel_hist = True
            analyze_raw_data.create_tot_pixel_hist = True
            analyze_raw_data.create_cluster_hit_table = True  # enables the creation of a table with all cluster hits, std. setting is false
            analyze_raw_data.create_source_scan_hist = True  # create source scan hists
            analyze_raw_data.create_cluster_size_hist = True  # enables cluster size histogramming, can save some time, std. setting is false
            analyze_raw_data.create_cluster_tot_hist = True  # enables cluster ToT histogramming per cluster size, std. setting is false
            analyze_raw_data.interpreter.set_warning_output(interpreter_warnings)  # std. setting is True
            analyze_raw_data.interpret_word_table()  # the actual start conversion command
            analyze_raw_data.interpreter.print_summary()  # prints the interpreter summary

            # Store the enables pixels for good pixel selection in TDC analysis step
            with tb.open_file(analyze_raw_data._analyzed_data_file, 'r+') as out_file_h5:
                with tb.open_file(analyze_raw_data.files_dict.items()[0][0]) as in_file_h5:  # Use first raw data file to extract enable mask
                    out_file_h5.root.ClusterHits.attrs.enabled_pixels = in_file_h5.root.configuration.Enable[:]

            if interpreter_plots:
                analyze_raw_data.plot_histograms()  # plots all activated histograms into one pdf
コード例 #2
0
def analyze_raw_data(input_file):  # FE-I4 raw data analysis
    '''Std. raw data analysis of FE-I4 data. A hit table is created for further analysis.

    Parameters
    ----------
    input_file : pytables file
    output_file_hits : pytables file
    '''
    with AnalyzeRawData(raw_data_file=input_file,
                        create_pdf=True) as analyze_raw_data:
        #analyze_raw_data.align_at_trigger_number = True  # if trigger number is at the beginning of each event activate this for event alignment
        analyze_raw_data.use_trigger_time_stamp = False  # the trigger number is a time stamp
        analyze_raw_data.use_tdc_word = False
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.create_meta_event_index = True
        analyze_raw_data.create_trigger_error_hist = True
        analyze_raw_data.create_rel_bcid_hist = True
        analyze_raw_data.create_error_hist = True
        analyze_raw_data.create_service_record_hist = True
        analyze_raw_data.create_occupancy_hist = True
        analyze_raw_data.create_tot_hist = False
        #         analyze_raw_data.n_bcid = 16
        #         analyze_raw_data.max_tot_value = 13
        analyze_raw_data.interpreter.create_empty_event_hits(False)
        #         analyze_raw_data.interpreter.set_debug_output(False)
        #         analyze_raw_data.interpreter.set_info_output(False)
        analyze_raw_data.interpreter.set_warning_output(False)
        #         analyze_raw_data.interpreter.debug_events(0, 1, True)
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        analyze_raw_data.plot_histograms()
コード例 #3
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold[self.increase_threshold])
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac[self.increase_threshold])
        self.register.set_pixel_register_value('Enable', self.last_good_enable_mask[0])  # use enable mask from the lowest point to mask bad pixels
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist[self.increase_threshold].T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask[self.increase_threshold].T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_three_way(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
            plot_occupancy(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=31, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[self.increase_threshold].T, title='Intermediate Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[0].T, title='Final Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[0], self.last_step[0]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[0].T, filename=analyze_raw_data.output_pdf)
コード例 #4
0
def analyze_raw_data_per_scan_parameter(input_file, output_file_hits, scan_data_filename, scan_parameters=['PlsrDAC']):
    with AnalyzeRawData(raw_data_file=input_file, analyzed_data_file=output_file_hits) as analyze_raw_data:
        analyze_raw_data.create_hit_table = True  # can be set to false to omit hit table creation, std. setting is false
        analyze_raw_data.create_tot_hist = True  # creates a ToT histogram

        for data_one_step, one_step_parameter in analyze_hits_per_scan_parameter(analyze_data=analyze_raw_data, scan_parameters=scan_parameters):
            data_one_step.plot_histograms(scan_data_filename + '_' + one_step_parameter, create_hit_hists_only=True)
コード例 #5
0
def analyze_raw_data(input_files, output_file_hits, scan_parameter):
    logging.info('Analyze the raw FE data given in ' + str(len(input_files)) +
                 ' files and store the needed data')
    if os.path.isfile(output_file_hits) and not analysis_configuration[
            'overwrite_output_files']:  # skip analysis if already done
        logging.warning('Analyzed data file ' + output_file_hits +
                        ' already exists. Skip analysis for this file.')
    else:
        with AnalyzeRawData(
                raw_data_file=input_files,
                analyzed_data_file=output_file_hits,
                scan_parameter_name=scan_parameter) as analyze_raw_data:
            analyze_raw_data.create_hit_table = True  # can be set to false to omit hit table creation, std. setting is false
            analyze_raw_data.create_cluster_table = True  # enables the creation of a table with all clusters, std. setting is false
            analyze_raw_data.create_source_scan_hist = True  # create source scan hists
            analyze_raw_data.create_cluster_size_hist = True  # enables cluster size histogramming, can save some time, std. setting is false
            analyze_raw_data.create_cluster_tot_hist = True  # enables cluster ToT histogramming per cluster size, std. setting is false
            analyze_raw_data.interpreter.set_warning_output(
                analysis_configuration['interpreter_warnings']
            )  # std. setting is True
            analyze_raw_data.clusterizer.set_warning_output(
                analysis_configuration['interpreter_warnings']
            )  # std. setting is True
            analyze_raw_data.interpreter.debug_events(
                0, 10, False
            )  # events to be printed onto the console for debugging, usually deactivated
            analyze_raw_data.interpret_word_table(
            )  # the actual start conversion command
            analyze_raw_data.interpreter.print_summary(
            )  # prints the interpreter summary
            analyze_raw_data.plot_histograms(
            )  # plots all activated histograms into one pdf
コード例 #6
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
         analyze_raw_data.interpreter.set_warning_output(True)
         analyze_raw_data.create_tot_hist = False
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.plot_histograms()
         analyze_raw_data.interpreter.print_summary()
コード例 #7
0
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_tot_hist = False
            analyze_raw_data.create_threshold_hists = True
            analyze_raw_data.create_fitted_threshold_hists = True
            analyze_raw_data.create_threshold_mask = True
            analyze_raw_data.n_injections = 100
            analyze_raw_data.interpreter.set_warning_output(False)  # so far the data structure in a threshold scan was always bad, too many warnings given
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()

            with tb.open_file(analyze_raw_data._analyzed_data_file, 'r') as out_file_h5:
                thr = out_file_h5.root.HistThresholdFitted[:]
                thr_masked = np.ma.masked_where(np.isclose(thr, 0), thr)
                corr = [thr_masked[:, i * 2 + 1:i * 2 + 3].mean() for i in range(0, 38)]
                corr = np.array(corr)
                corr -= corr.min()
#                 corr = np.around(corr).astype(int)

        if "C_High".lower() in map(lambda x: x.lower(), self.enable_shift_masks) and "C_Low".lower() in map(lambda x: x.lower(), self.enable_shift_masks):
            self.register.calibration_parameters['Pulser_Corr_C_Inj_High'] = list(corr)
        elif "C_High".lower() in map(lambda x: x.lower(), self.enable_shift_masks):
            self.register.calibration_parameters['Pulser_Corr_C_Inj_Med'] = list(corr)
        elif "C_Low".lower() in map(lambda x: x.lower(), self.enable_shift_masks):
            self.register.calibration_parameters['Pulser_Corr_C_Inj_Low'] = list(corr)
        else:
            raise ValueError('Unknown C_Inj')
コード例 #8
0
def analyze_raw_data(input_files, output_file_hits, interpreter_plots, overwrite_output_files, pdf_filename):
    logging.info('Analyze the raw FE data given in ' + str(len(input_files)) + ' files and store the needed data')
    if os.path.isfile(output_file_hits) and not overwrite_output_files:  # skip analysis if already done
        logging.info('Analyzed data file ' + output_file_hits + ' already exists. Skip analysis for this file.')
    else:
        with AnalyzeRawData(raw_data_file=input_files, analyzed_data_file=output_file_hits) as analyze_raw_data:
#             analyze_raw_data.interpreter.debug_events(3645978, 3645978, True)
#             analyze_raw_data.interpreter.debug_events(100, 110, True)
#             analyze_raw_data.use_tdc_trigger_time_stamp = True  # if you want to also measure the delay between trigger / hit-bus
#             analyze_raw_data.max_tdc_delay = 80
            analyze_raw_data.align_at_trigger = False  # align events at TDC words, first word of event has to be a tdc word
            analyze_raw_data.align_at_tdc = True  # align events at TDC words, first word of event has to be a tdc word
            analyze_raw_data.create_tdc_counter_hist = True  # create a histogram for all TDC words
            analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
            analyze_raw_data.create_tdc_pixel_hist = True
            analyze_raw_data.create_tot_pixel_hist = True
            analyze_raw_data.create_cluster_hit_table = True  # enables the creation of a table with all cluster hits, std. setting is false
            analyze_raw_data.create_source_scan_hist = True  # create source scan hists
            analyze_raw_data.create_cluster_size_hist = True  # enables cluster size histogramming, can save some time, std. setting is false
            analyze_raw_data.create_cluster_tot_hist = True  # enables cluster ToT histogramming per cluster size, std. setting is false
            analyze_raw_data.interpreter.set_warning_output(analysis_configuration['interpreter_warnings'])  # std. setting is True
            analyze_raw_data.clusterizer.set_warning_output(analysis_configuration['interpreter_warnings'])  # std. setting is True
            analyze_raw_data.interpreter.print_status()
            analyze_raw_data.interpret_word_table()  # the actual start conversion command
            analyze_raw_data.interpreter.print_summary()  # prints the interpreter summary
            if interpreter_plots:
                analyze_raw_data.plot_histograms()  # plots all activated histograms into one pdf
コード例 #9
0
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.create_hit_table = False
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()
            with tb.open_file(analyze_raw_data._analyzed_data_file,
                              'r') as out_file_h5:
                occ_hist = out_file_h5.root.HistOcc[:, :, 0].T
            self.occ_mask = np.zeros(shape=occ_hist.shape,
                                     dtype=np.dtype('>u1'))
            # noisy pixels are set to 1
            self.occ_mask[occ_hist > self.abs_occ_limit] = 1
            # make inverse
            self.inv_occ_mask = invert_pixel_mask(self.occ_mask)

            if self.overwrite_mask:
                for mask in self.disable_for_mask:
                    self.register.set_pixel_register_value(
                        mask, self.inv_occ_mask)
            else:
                for mask in self.disable_for_mask:
                    enable_mask = np.logical_and(
                        self.inv_occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, enable_mask)

            if self.overwrite_mask:
                for mask in self.enable_for_mask:
                    self.register.set_pixel_register_value(mask, self.occ_mask)
            else:
                for mask in self.enable_for_mask:
                    disable_mask = np.logical_or(
                        self.occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, disable_mask)
            plot_occupancy(self.occ_mask.T,
                           title='Noisy Pixels',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.occ_mask.T,
                                 z_max=1,
                                 filename=analyze_raw_data.output_pdf)
            for mask in self.disable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
            for mask in self.enable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
コード例 #10
0
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.create_tot_hist = False
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()
            #             occ_hist = make_occupancy_hist(*convert_data_array(data_array_from_data_dict_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array)).T
            with tb.open_file(analyze_raw_data._analyzed_data_file,
                              'r') as out_file_h5:
                occ_hist = out_file_h5.root.HistOcc[:, :, 0].T
            self.occ_mask = np.zeros(shape=occ_hist.shape,
                                     dtype=np.dtype('>u1'))
            # noisy pixels are set to 1
            self.occ_mask[occ_hist < self.n_injections] = 1
            # make inverse
            self.inv_occ_mask = invert_pixel_mask(self.occ_mask)
            self.disable_for_mask = self.disable_for_mask
            if self.overwrite_mask:
                for mask in self.disable_for_mask:
                    self.register.set_pixel_register_value(
                        mask, self.inv_occ_mask)
            else:
                for mask in self.disable_for_mask:
                    enable_mask = np.logical_and(
                        self.inv_occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, enable_mask)

            self.enable_for_mask = self.enable_for_mask
            if self.overwrite_mask:
                for mask in self.enable_for_mask:
                    self.register.set_pixel_register_value(mask, self.occ_mask)
            else:
                for mask in self.enable_for_mask:
                    disable_mask = np.logical_or(
                        self.occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, disable_mask)

            plot_occupancy(self.occ_mask.T,
                           title='Stuck Pixels',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            for mask in self.disable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
            for mask in self.enable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
コード例 #11
0
def analyze_hits(input_file, output_file_hits, scan_data_filename, output_file_hits_analyzed=None):
    with AnalyzeRawData(raw_data_file=input_file, analyzed_data_file=output_file_hits) as analyze_raw_data:
        analyze_raw_data.create_source_scan_hist = True
        analyze_raw_data.create_cluster_hit_table = True
        analyze_raw_data.create_cluster_table = True
        analyze_raw_data.create_cluster_size_hist = True
        analyze_raw_data.create_cluster_tot_hist = True
        analyze_raw_data.analyze_hit_table(analyzed_data_out_file=output_file_hits_analyzed)
        analyze_raw_data.plot_histograms(scan_data_filename=scan_data_filename, analyzed_data_file=output_file_hits_analyzed)
コード例 #12
0
ファイル: test_analysis.py プロジェクト: CARIBOuSystem/pyBAR
 def setUpClass(cls):
     cls.interpreter = PyDataInterpreter()
     cls.histogram = PyDataHistograming()
     cls.clusterizer = PyDataClusterizer()
     with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_1.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_1_interpreted.h5', create_pdf=False) as analyze_raw_data:  # analyze the digital scan raw data, do not show any feedback (no prints to console, no plots)
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_hit_table = True  # can be set to false to omit hit table creation, std. setting is false
         analyze_raw_data.create_cluster_hit_table = True  # adds the cluster id and seed info to each hit, std. setting is false
         analyze_raw_data.create_cluster_table = True  # enables the creation of a table with all clusters, std. setting is false
         analyze_raw_data.create_trigger_error_hist = True  # creates a histogram summing up the trigger errors
         analyze_raw_data.create_cluster_size_hist = True  # enables cluster size histogramming, can save some time, std. setting is false
         analyze_raw_data.create_cluster_tot_hist = True  # enables cluster ToT histogramming per cluster size, std. setting is false
         analyze_raw_data.create_meta_word_index = True  # stores the start and stop raw data word index for every event, std. setting is false
         analyze_raw_data.create_meta_event_index = True  # stores the event number for each readout in an additional meta data array, default: False
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
     with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_2.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_2_interpreted.h5', create_pdf=False) as analyze_raw_data:  # analyze the fast threshold scan raw data, do not show any feedback (no prints to console, no plots)
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_threshold_hists = True  # makes only sense if threshold scan data is analyzed, std. setting is false
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
     with AnalyzeRawData(raw_data_file=None, analyzed_data_file=tests_data_folder + 'unit_test_data_1_interpreted.h5', create_pdf=False) as analyze_raw_data:   # analyze the digital scan hit data, do not show any feedback (no prints to console, no plots)
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_cluster_hit_table = True
         analyze_raw_data.create_cluster_table = True
         analyze_raw_data.create_cluster_size_hist = True
         analyze_raw_data.create_cluster_tot_hist = True
         analyze_raw_data.analyze_hit_table(analyzed_data_out_file=tests_data_folder + 'unit_test_data_1_analyzed.h5')
     with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_3.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_3_interpreted.h5', create_pdf=False) as analyze_raw_data:  # analyze the digital scan raw data per scan parameter, do not show any feedback (no prints to console, no plots)
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_hit_table = True  # can be set to false to omit hit table creation, std. setting is false
         analyze_raw_data.create_cluster_hit_table = True  # adds the cluster id and seed info to each hit, std. setting is false
         analyze_raw_data.create_cluster_table = True  # enables the creation of a table with all clusters, std. setting is false
         analyze_raw_data.create_trigger_error_hist = True  # creates a histogram summing up the trigger errors
         analyze_raw_data.create_cluster_size_hist = True  # enables cluster size histogramming, can save some time, std. setting is false
         analyze_raw_data.create_cluster_tot_hist = True  # enables cluster ToT histogramming per cluster size, std. setting is false
         analyze_raw_data.create_meta_word_index = True  # stores the start and stop raw data word index for every event, std. setting is false
         analyze_raw_data.create_meta_event_index = True  # stores the event number for each readout in an additional meta data array, default: False
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
     with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_2.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_2_hits.h5', create_pdf=False) as analyze_raw_data:  # analyze the fast threshold scan raw data, do not show any feedback (no prints to console, no plots)
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.create_threshold_hists = True  # makes only sense if threshold scan data is analyzed, std. setting is false
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
     with AnalyzeRawData(raw_data_file=None, analyzed_data_file=tests_data_folder + 'unit_test_data_2_hits.h5', create_pdf=False) as analyze_raw_data:
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_threshold_hists = True
         analyze_raw_data.analyze_hit_table(analyzed_data_out_file=tests_data_folder + 'unit_test_data_2_analyzed.h5')
     with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_4.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_4_interpreted.h5', create_pdf=False) as analyze_raw_data:
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
     with AnalyzeRawData(raw_data_file=[tests_data_folder + 'unit_test_data_4_parameter_128.h5', tests_data_folder + 'unit_test_data_4_parameter_256.h5'], analyzed_data_file=tests_data_folder + 'unit_test_data_4_interpreted_2.h5', scan_parameter_name='parameter', create_pdf=False) as analyze_raw_data:
         analyze_raw_data.chunk_size = 2999999
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False)  # the actual start conversion command
コード例 #13
0
 def analyze_raw_data_file(file_name):
     with AnalyzeRawData(raw_data_file=file_name,
                         create_pdf=False) as analyze_raw_data:
         analyze_raw_data.create_tot_hist = False
         analyze_raw_data.create_fitted_threshold_hists = True
         analyze_raw_data.create_threshold_mask = True
         analyze_raw_data.interpreter.set_warning_output(
             True
         )  # so far the data structure in a threshold scan was always bad, too many warnings given
         analyze_raw_data.interpret_word_table()
コード例 #14
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_tot_hist = True
         if self.enable_tdc:
             analyze_raw_data.create_tdc_counter_hist = True  # histogram all TDC words
             analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
             analyze_raw_data.interpreter.use_tdc_word(True)  # align events at the TDC word
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.plot_histograms()
         analyze_raw_data.interpreter.print_summary()
コード例 #15
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_cluster_size_hist = True  # can be set to false to omit cluster hit creation, can save some time, standard setting is false
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.create_cluster_tot_hist = True
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #16
0
def analyze_raw_data(input_file, output_file_hits):
    with AnalyzeRawData(raw_data_file=input_file,
                        analyzed_data_file=output_file_hits,
                        create_pdf=True) as analyze_raw_data:
        analyze_raw_data.create_hit_table = False  # can be set to false to omit hit table creation, std. setting is false
        analyze_raw_data.create_cluster_hit_table = False  # adds the cluster id and seed info to each hit, std. setting is false
        analyze_raw_data.create_cluster_table = False  # enables the creation of a table with all clusters, std. setting is false

        analyze_raw_data.create_occupancy_hist = True  # creates a colxrow histogram with accumulated hits for each scan parameter
        analyze_raw_data.create_tot_hist = True  # creates a ToT histogram
        analyze_raw_data.create_rel_bcid_hist = True  # creates a histogram with the relative BCID of the hits
        analyze_raw_data.create_service_record_hist = True  # creates a histogram with all SR send out from the FE
        analyze_raw_data.create_error_hist = True  # creates a histogram summing up the event errors that occurred
        analyze_raw_data.create_trigger_error_hist = True  # creates a histogram summing up the trigger errors
        analyze_raw_data.create_source_scan_hist = False  # create source scan hists
        analyze_raw_data.create_cluster_size_hist = False  # enables cluster size histogramming, can save some time, std. setting is false
        analyze_raw_data.create_cluster_tot_hist = False  # enables cluster ToT histogramming per cluster size, std. setting is false
        analyze_raw_data.create_threshold_hists = False  # makes only sense if threshold scan data is analyzed, std. setting is false
        analyze_raw_data.create_threshold_mask = False  # masking of noisy or black pixels during histogramming, only affecting fast-algorithm
        analyze_raw_data.create_fitted_threshold_hists = False  # makes only sense if threshold scan data is analyzed, std. setting is false
        analyze_raw_data.create_fitted_threshold_mask = False  # masking of noisy or black pixels during histogramming, only affecting S-curve fitting

        analyze_raw_data.create_meta_word_index = False  # stores the start and stop raw data word index for every event, std. setting is false
        analyze_raw_data.create_meta_event_index = True  # stores the event number for each readout in an additional meta data array, default: False

        analyze_raw_data.n_bcid = 16  # set the number of BCIDs per event, needed to judge the event structure, only active if settings are not taken from raw data file
        analyze_raw_data.n_injections = 100  # set the numbers of injections, needed for fast threshold/noise determination
        analyze_raw_data.max_tot_value = 13  # set the maximum ToT value considered to be a hit, 14 is a late hit
        analyze_raw_data.use_trigger_number = False
        analyze_raw_data.set_stop_mode = False  # special analysis if data was taken in stop mode
        analyze_raw_data.interpreter.use_tdc_word(
            False
        )  # use the TDC word to align the events, assume that they are first words in the event
        analyze_raw_data.interpreter.use_trigger_time_stamp(
            False)  # use the trigger number as a time stamp

        analyze_raw_data.interpreter.set_debug_output(
            False)  # std. setting is False
        analyze_raw_data.interpreter.set_info_output(
            False)  # std. setting is False
        analyze_raw_data.interpreter.set_warning_output(
            True)  # std. setting is True
        analyze_raw_data.clusterizer.set_warning_output(
            True)  # std. setting is True
        analyze_raw_data.interpreter.debug_events(
            3832, 3850, False
        )  # events to be printed onto the console for debugging, usually deactivated
        analyze_raw_data.interpret_word_table(
        )  # the actual start conversion command
        analyze_raw_data.interpreter.print_summary(
        )  # prints the interpreter summary
        analyze_raw_data.plot_histograms(
            pdf_filename=input_file
        )  # plots all activated histograms into one pdf
コード例 #17
0
 def analyze_raw_data_file(file_name):
     if os.path.isfile(os.path.splitext(file_name)[0] + '_interpreted.h5'):  # skip analysis if already done
         logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.')
     else:
         with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data:
             analyze_raw_data.create_tot_hist = False
             analyze_raw_data.create_tot_pixel_hist = False
             analyze_raw_data.create_fitted_threshold_hists = True
             analyze_raw_data.create_threshold_mask = True
             analyze_raw_data.interpreter.set_warning_output(False)  # RX errors would fill the console
             analyze_raw_data.interpret_word_table()
コード例 #18
0
ファイル: tune_merged_pixels.py プロジェクト: ljthink/pyBAR
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_tot_hist = True
            if self.enable_tdc:
                analyze_raw_data.create_tdc_counter_hist = True  # histogram all TDC words
                analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()

            occ_hist = analyze_raw_data.out_file_h5.root.HistOcc[:, :, 0].T
            occ_mask = np.zeros(shape=occ_hist.shape, dtype=np.dtype('>u1'))
            occ_mask[occ_hist > 1] = 1

            inv_occ_mask = invert_pixel_mask(occ_mask)
            if self.overwrite_mask:
                for mask in self.disable_for_mask:
                    self.register.set_pixel_register_value(mask, inv_occ_mask)
            else:
                for mask in self.disable_for_mask:
                    enable_mask = np.logical_and(
                        inv_occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, enable_mask)

            if self.overwrite_mask:
                for mask in self.enable_for_mask:
                    self.register.set_pixel_register_value(mask, occ_mask)
            else:
                for mask in self.enable_for_mask:
                    disable_mask = np.logical_or(
                        occ_mask, self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, disable_mask)
            plot_occupancy(occ_mask.T,
                           title='Merged Pixels',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(occ_mask.T,
                                 z_max=1,
                                 filename=analyze_raw_data.output_pdf)
            for mask in self.disable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
            for mask in self.enable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
コード例 #19
0
ファイル: scan_threshold.py プロジェクト: makoc/pyBAR
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_tot_hist = False
         analyze_raw_data.create_fitted_threshold_hists = True
         analyze_raw_data.create_threshold_mask = True
         analyze_raw_data.n_injections = 100
         analyze_raw_data.interpreter.set_warning_output(
             False
         )  # so far the data structure in a threshold scan was always bad, too many warnings given
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #20
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.trig_count = self.trig_count  # set number of BCID to overwrite the number deduced from the raw data file
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.use_trigger_time_stamp = True
         analyze_raw_data.set_stop_mode = True
         analyze_raw_data.align_at_trigger = True
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.interpret_word_table(use_settings_from_file=False)
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #21
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.trigger_data_format = self.dut['TLU'][
             'DATA_FORMAT']
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.set_stop_mode = True
         analyze_raw_data.align_at_trigger = True
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #22
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.threshold[0])
        self.register.set_pixel_register_value('TDAC', self.new_tdac[0])
        self.register.set_pixel_register_value('Enable', self.new_enable_mask[0])  # use enable mask from the lowest point to mask bad pixels
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            last_step = None
            for step in range(self.plot_n_steps, -1, -1):
                if self.threshold[step] is not None:
                    plot_occupancy(self.occupancy_hist[step].T, title='Occupancy at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), filename=analyze_raw_data.output_pdf)
                    plot_fancy_occupancy(self.occupancy_hist[step].T, filename=analyze_raw_data.output_pdf)
                    plot_occupancy(self.occupancy_mask[step].T, title='Noisy pixels at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=1, filename=analyze_raw_data.output_pdf)
                    plot_fancy_occupancy(self.occupancy_mask[step].T, filename=analyze_raw_data.output_pdf)
                    plot_three_way(self.tdac[step].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
                    plot_occupancy(self.tdac[step].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=31, filename=analyze_raw_data.output_pdf)
                    plot_occupancy(self.enable_mask[step].T, title='Enable mask at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=1, filename=analyze_raw_data.output_pdf)
                    # adding Poisson statistics plots
                    fig = Figure()
                    FigureCanvas(fig)
                    ax = fig.add_subplot(111)
                    ax.set_title("Hit statistics")
                    hist, bin_edges = np.histogram(self.occupancy_hist[step], bins=np.arange(0.0, np.max(self.occupancy_hist[step]) + 2, 1.0))
                    try:
                        _, idx = hist_quantiles(hist, [0.0, 0.9], return_indices=True)
                    except IndexError:
                        idx = [0, 1]
                    bins = np.arange(0, np.maximum(bin_edges[idx[1]], stats.poisson.ppf(0.9999, mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1)) + 2, 1)
                    ax.hist(self.occupancy_hist[step].flatten(), bins=bins, align='left', alpha=0.5, label="Measured occupancy")
                    ax.bar(x=bins[:-1], height=stats.poisson.pmf(k=bins[:-1], mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1) * self.enable_mask[step].sum(), alpha=0.5, width=1.0, color="r", label="Expected occupancy (Poisson statistics)")
                    # ax.hist(stats.poisson.rvs(mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1, size=self.enable_mask[step].sum()), bins=bins, align='left', alpha=0.5, label="Expected occupancy (Poisson statistics)")
                    ax.set_xlabel('#Hits')
                    ax.set_ylabel('#Pixels')
                    ax.legend()
                    analyze_raw_data.output_pdf.savefig(fig)
                    last_step = step
            if last_step is not None:
                plot_three_way(self.new_tdac[last_step].T, title='Final TDAC after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
                plot_occupancy(self.new_tdac[last_step].T, title='Final TDAC after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), z_max=31, filename=analyze_raw_data.output_pdf)
                plot_occupancy(self.new_enable_mask[last_step].T, title='Final Enable mask after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), z_max=1, filename=analyze_raw_data.output_pdf)
コード例 #23
0
ファイル: tune_stuck_pixel.py プロジェクト: ljthink/pyBAR
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.create_tot_hist = False
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()

            occ_hist = analyze_raw_data.out_file_h5.root.HistOcc[:, :, 0].T
            occ_mask = np.zeros(shape=occ_hist.shape, dtype=np.dtype('>u1'))
            # noisy pixels are set to 1
            occ_mask[occ_hist < self.n_injections] = 1
            # make inverse
            inv_occ_mask = invert_pixel_mask(occ_mask)
            if self.overwrite_mask:
                for mask in self.disable_for_mask:
                    self.register.set_pixel_register_value(mask, inv_occ_mask)
            else:
                for mask in self.disable_for_mask:
                    enable_mask = np.logical_and(
                        inv_occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, enable_mask)

            if self.overwrite_mask:
                for mask in self.enable_for_mask:
                    self.register.set_pixel_register_value(mask, occ_mask)
            else:
                for mask in self.enable_for_mask:
                    disable_mask = np.logical_or(
                        occ_mask, self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, disable_mask)

            plot_occupancy(occ_mask.T,
                           title='Stuck Pixels',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            for mask in self.disable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
            for mask in self.enable_for_mask:
                mask_name = self.register.pixel_registers[mask]['name']
                plot_occupancy(self.register.get_pixel_register_value(mask).T,
                               title='%s Mask' % mask_name,
                               z_max=1,
                               filename=analyze_raw_data.output_pdf)
コード例 #24
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.create_cluster_size_hist = True
         analyze_raw_data.create_cluster_tot_hist = True
         analyze_raw_data.align_at_trigger = True
         if self.enable_tdc:
             analyze_raw_data.create_tdc_counter_hist = True  # histogram all TDC words
             analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
             analyze_raw_data.align_at_tdc = False  # align events at the TDC word
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.interpret_word_table()
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #25
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.n_bcid = self.bcid_window
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.use_trigger_time_stamp = True
         analyze_raw_data.set_stop_mode = True
         analyze_raw_data.align_at_trigger = True
         analyze_raw_data.create_cluster_size_hist = True
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.clusterizer.set_warning_output(False)
         analyze_raw_data.interpret_word_table(use_settings_from_file=False)
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #26
0
def analyze_hits(input_file_hits):
    with AnalyzeRawData(
            raw_data_file=None,
            analyzed_data_file=input_file_hits) as analyze_raw_data:
        analyze_raw_data.create_source_scan_hist = True
        analyze_raw_data.create_cluster_hit_table = True
        analyze_raw_data.create_cluster_table = True
        analyze_raw_data.create_cluster_size_hist = True
        analyze_raw_data.create_cluster_tot_hist = True
        analyze_raw_data.create_tdc_hist = True
        analyze_raw_data.analyze_hit_table(
            analyzed_data_out_file=input_file_hits[:-3] + '_analyzed.h5')
        analyze_raw_data.plot_histograms(
            pdf_filename=input_file_hits[:-3],
            analyzed_data_file=input_file_hits[:-3] + '_analyzed.h5')
コード例 #27
0
 def analyze(self):
     with AnalyzeRawData(raw_data_file=self.output_filename,
                         create_pdf=True) as analyze_raw_data:
         analyze_raw_data.create_hit_table = True
         analyze_raw_data.n_bcid = self.bcid_window
         analyze_raw_data.create_source_scan_hist = True
         analyze_raw_data.use_trigger_time_stamp = True
         analyze_raw_data.set_stop_mode = True
         analyze_raw_data.interpreter.use_trigger_number(True)
         analyze_raw_data.create_cluster_size_hist = True
         analyze_raw_data.interpreter.set_warning_output(False)
         analyze_raw_data.clusterizer.set_warning_output(False)
         #             analyze_raw_data.interpreter.debug_events(0, 10, True)  # events to be printed onto the console for debugging, usually deactivated
         analyze_raw_data.interpret_word_table(use_settings_from_file=False)
         analyze_raw_data.interpreter.print_summary()
         analyze_raw_data.plot_histograms()
コード例 #28
0
def analyse_selected_hits(input_file_hits,
                          output_file_hits,
                          output_file_hits_analyzed,
                          scan_data_filenames,
                          cluster_size_condition='cluster_size==1',
                          n_cluster_condition='n_cluster==1'):
    logging.info('Analyze selected hits with ' + cluster_size_condition +
                 ' and ' + n_cluster_condition + ' in ' + input_file_hits)
    if os.path.isfile(output_file_hits) and not analysis_configuration[
            "overwrite_output_files"]:  # skip analysis if already done
        logging.warning('Selected hit data file ' + output_file_hits +
                        ' already exists. Skip analysis for this file.')
    else:
        analysis.select_hits_from_cluster_info(
            input_file_hits=input_file_hits,
            output_file_hits=output_file_hits,
            cluster_size_condition=cluster_size_condition,
            n_cluster_condition=n_cluster_condition
        )  # select hits and copy the mto new file
    if os.path.isfile(
            output_file_hits_analyzed) and not analysis_configuration[
                "overwrite_output_files"]:  # skip analysis if already done
        logging.warning('Analyzed selected hit data file ' +
                        output_file_hits_analyzed +
                        ' already exists. Skip analysis for this file.')
    else:
        logging.info('Analyze selected hits in ' + output_file_hits)
        with AnalyzeRawData(
                raw_data_file=None,
                analyzed_data_file=output_file_hits) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.create_tot_hist = False
            analyze_raw_data.create_cluster_size_hist = True
            analyze_raw_data.create_cluster_tot_hist = True
            analyze_raw_data.analyze_hit_table(
                analyzed_data_out_file=output_file_hits_analyzed)
            analyze_raw_data.plot_histograms(
                scan_data_filename=output_file_hits_analyzed,
                analyzed_data_file=output_file_hits_analyzed)
        with tb.openFile(
                input_file_hits, mode="r"
        ) as in_hit_file_h5:  # copy meta data to the new analyzed file
            with tb.openFile(output_file_hits_analyzed,
                             mode="r+") as output_hit_file_h5:
                in_hit_file_h5.root.meta_data.copy(
                    output_hit_file_h5.root)  # copy meta_data note to new file
コード例 #29
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine",
                                                self.last_good_threshold)
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac)
        self.register.set_pixel_register_value('Enable',
                                               self.last_good_enable_mask)

        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist.T,
                           title='Noisy Pixels at Vthin_AltFine %d Step %d' %
                           (self.last_reg_val, self.last_step),
                           filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist.T,
                                 filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask.T,
                           title='Occupancy Mask at Vthin_AltFine %d Step %d' %
                           (self.last_reg_val, self.last_step),
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask.T,
                                 filename=analyze_raw_data.output_pdf)
            plotThreeWay(self.last_tdac_distribution.T,
                         title='TDAC at Vthin_AltFine %d Step %d' %
                         (self.last_reg_val, self.last_step),
                         x_axis_title="TDAC",
                         filename=analyze_raw_data.output_pdf,
                         maximum=31,
                         bins=32)
            plot_occupancy(self.last_tdac_distribution.T,
                           title='TDAC at Vthin_AltFine %d Step %d' %
                           (self.last_reg_val, self.last_step),
                           z_max=31,
                           filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.register.get_pixel_register_value('Enable').T,
                           title='Enable Mask',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(
                self.register.get_pixel_register_value('Enable').T,
                filename=analyze_raw_data.output_pdf)
コード例 #30
0
    def analyze(self):
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_tot_hist = True
            if self.enable_tdc:
                analyze_raw_data.create_tdc_counter_hist = True  # histogram all TDC words
                analyze_raw_data.create_tdc_hist = True  # histogram the hit TDC information
                analyze_raw_data.interpreter.use_tdc_word(
                    True)  # align events at the TDC word
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.plot_histograms()
            analyze_raw_data.interpreter.print_summary()

            with tb.open_file(analyze_raw_data._analyzed_data_file,
                              'r') as out_file_h5:
                occ_hist = out_file_h5.root.HistOcc[:, :, 0].T
            occ_mask = np.zeros(shape=occ_hist.shape, dtype=np.dtype('>u1'))
            occ_mask[occ_hist > 0] = 1
            plot_occupancy(occ_mask.T,
                           title='Merged Pixels',
                           z_max=1,
                           filename=analyze_raw_data.output_pdf)

            inv_occ_mask = invert_pixel_mask(occ_mask)
            if self.overwrite_mask:
                for mask in self.disable_for_mask:
                    self.register.set_pixel_register_value(mask, inv_occ_mask)
            else:
                for mask in self.disable_for_mask:
                    enable_mask = np.logical_and(
                        inv_occ_mask,
                        self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, enable_mask)

            if self.overwrite_mask:
                for mask in self.enable_for_mask:
                    self.register.set_pixel_register_value(mask, occ_mask)
            else:
                for mask in self.enable_for_mask:
                    disable_mask = np.logical_or(
                        occ_mask, self.register.get_pixel_register_value(mask))
                    self.register.set_pixel_register_value(mask, disable_mask)
コード例 #31
0
ファイル: analysis.py プロジェクト: makoc/pyBAR
def analyze_beam_spot(
    scan_base,
    combine_n_readouts=1000,
    chunk_size=10000000,
    plot_occupancy_hists=False,
    output_pdf=None,
    output_file=None,
):
    """ Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
    for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.

     Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    """
    time_stamp = []
    x = []
    y = []

    for data_file in scan_base:
        with tb.openFile(data_file + "_interpreted.h5", mode="r+") as in_hit_file_h5:
            # get data and data pointer
            meta_data_array = in_hit_file_h5.root.meta_data[:]
            hit_table = in_hit_file_h5.root.Hits

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (
                    analysis_utils.get_ranges_from_array(meta_data_array["timestamp_start"][::combine_n_readouts]),
                    analysis_utils.get_ranges_from_array(meta_data_array["event_number"][::combine_n_readouts]),
                )
            )

            # create a event_numer index (important)
            analysis_utils.index_event_number(hit_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False
            analyze_data.histograming.set_no_scan_parameter()

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    analysis_utils.ETA(),
                ],
                maxval=hit_table.shape[0],
                term_width=80,
            )
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(parameter_ranges):
                logging.debug(
                    "Analyze time stamp "
                    + str(parameter_range[0])
                    + " and data from events = ["
                    + str(parameter_range[2])
                    + ","
                    + str(parameter_range[3])
                    + "[ "
                    + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0)))
                    + "%"
                )
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_hit_len = (
                    0
                )  # variable to calculate a optimal chunk size value from the number of hits for speed up
                for hits, index in analysis_utils.data_aligned_at_events(
                    hit_table,
                    start_event_number=parameter_range[2],
                    stop_event_number=parameter_range[3],
                    start=index,
                    chunk_size=best_chunk_size,
                ):
                    analyze_data.analyze_hits(hits)  # analyze the selected hits in chunks
                    readout_hit_len += hits.shape[0]
                    progress_bar.update(index)
                best_chunk_size = (
                    int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size
                )  # to increase the readout speed, estimated the number of hits for one read instruction

                # get and store results
                occupancy_array = analyze_data.histograming.get_occupancy()
                projection_x = np.sum(occupancy_array, axis=0).ravel()
                projection_y = np.sum(occupancy_array, axis=1).ravel()
                x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80)))
                y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336)))
                time_stamp.append(parameter_range[0])
                if plot_occupancy_hists:
                    plotting.plot_occupancy(
                        occupancy_array[:, :, 0],
                        title="Occupancy for events between "
                        + time.strftime("%H:%M:%S", time.localtime(parameter_range[0]))
                        + " and "
                        + time.strftime("%H:%M:%S", time.localtime(parameter_range[1])),
                        filename=output_pdf,
                    )
            progress_bar.finish()
    plotting.plot_scatter(
        [i * 250 for i in x],
        [i * 50 for i in y],
        title="Mean beam position",
        x_label="x [um]",
        y_label="y [um]",
        marker_style="-o",
        filename=output_pdf,
    )
    if output_file:
        with tb.openFile(output_file, mode="a") as out_file_h5:
            rec_array = np.array(zip(time_stamp, x, y), dtype=[("time_stamp", float), ("x", float), ("y", float)])
            try:
                beam_spot_table = out_file_h5.createTable(
                    out_file_h5.root,
                    name="Beamspot",
                    description=rec_array,
                    title="Beam spot position",
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                beam_spot_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(output_file + " has already a Beamspot note, do not overwrite existing.")
    return time_stamp, x, y
コード例 #32
0
ファイル: analysis.py プロジェクト: makoc/pyBAR
def analyse_n_cluster_per_event(
    scan_base,
    include_no_cluster=False,
    time_line_absolute=True,
    combine_n_readouts=1000,
    chunk_size=10000000,
    plot_n_cluster_hists=False,
    output_pdf=None,
    output_file=None,
):
    """ Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts').

    Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    include_no_cluster: bool
        Set to true to also consider all events without any hit.
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    """

    time_stamp = []
    n_cluster = []

    start_time_set = False

    for data_file in scan_base:
        with tb.openFile(data_file + "_interpreted.h5", mode="r+") as in_cluster_file_h5:
            # get data and data pointer
            meta_data_array = in_cluster_file_h5.root.meta_data[:]
            cluster_table = in_cluster_file_h5.root.Cluster

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (
                    analysis_utils.get_ranges_from_array(meta_data_array["timestamp_start"][::combine_n_readouts]),
                    analysis_utils.get_ranges_from_array(meta_data_array["event_number"][::combine_n_readouts]),
                )
            )

            # create a event_numer index (important for speed)
            analysis_utils.index_event_number(cluster_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            total_cluster = cluster_table.shape[0]

            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    analysis_utils.ETA(),
                ],
                maxval=total_cluster,
                term_width=80,
            )
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(parameter_ranges):
                logging.debug(
                    "Analyze time stamp "
                    + str(parameter_range[0])
                    + " and data from events = ["
                    + str(parameter_range[2])
                    + ","
                    + str(parameter_range[3])
                    + "[ "
                    + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0)))
                    + "%"
                )
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_cluster_len = (
                    0
                )  # variable to calculate a optimal chunk size value from the number of hits for speed up
                hist = None
                for clusters, index in analysis_utils.data_aligned_at_events(
                    cluster_table,
                    start_event_number=parameter_range[2],
                    stop_event_number=parameter_range[3],
                    start=index,
                    chunk_size=best_chunk_size,
                ):
                    n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters["event_number"])[
                        :, 1
                    ]  # array with the number of cluster per event, cluster per event are at least 1
                    if hist is None:
                        hist = np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0]
                    else:
                        hist = np.add(hist, np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0])
                    if include_no_cluster and parameter_range[3] is not None:  # happend for the last readout
                        hist[0] = (parameter_range[3] - parameter_range[2]) - len(
                            n_cluster_per_event
                        )  # add the events without any cluster
                    readout_cluster_len += clusters.shape[0]
                    total_cluster -= len(clusters)
                    progress_bar.update(index)
                best_chunk_size = (
                    int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size
                )  # to increase the readout speed, estimated the number of hits for one read instruction

                if plot_n_cluster_hists:
                    plotting.plot_1d_hist(
                        hist,
                        title="Number of cluster per event at " + str(parameter_range[0]),
                        x_axis_title="Number of cluster",
                        y_axis_title="#",
                        log_y=True,
                        filename=output_pdf,
                    )
                hist = hist.astype("f4") / np.sum(hist)  # calculate fraction from total numbers

                if time_line_absolute:
                    time_stamp.append(parameter_range[0])
                else:
                    if not start_time_set:
                        start_time = parameter_ranges[0, 0]
                        start_time_set = True
                    time_stamp.append((parameter_range[0] - start_time) / 60.0)
                n_cluster.append(hist)
            progress_bar.finish()
            if total_cluster != 0:
                logging.warning("Not all clusters were selected during analysis. Analysis is therefore not exact")

    if time_line_absolute:
        plotting.plot_scatter_time(
            time_stamp,
            n_cluster,
            title="Number of cluster per event as a function of time",
            marker_style="o",
            filename=output_pdf,
            legend=("0 cluster", "1 cluster", "2 cluster", "3 cluster")
            if include_no_cluster
            else ("0 cluster not plotted", "1 cluster", "2 cluster", "3 cluster"),
        )
    else:
        plotting.plot_scatter(
            time_stamp,
            n_cluster,
            title="Number of cluster per event as a function of time",
            x_label="time [min.]",
            marker_style="o",
            filename=output_pdf,
            legend=("0 cluster", "1 cluster", "2 cluster", "3 cluster")
            if include_no_cluster
            else ("0 cluster not plotted", "1 cluster", "2 cluster", "3 cluster"),
        )
    if output_file:
        with tb.openFile(output_file, mode="a") as out_file_h5:
            cluster_array = np.array(n_cluster)
            rec_array = np.array(
                zip(
                    time_stamp,
                    cluster_array[:, 0],
                    cluster_array[:, 1],
                    cluster_array[:, 2],
                    cluster_array[:, 3],
                    cluster_array[:, 4],
                    cluster_array[:, 5],
                ),
                dtype=[
                    ("time_stamp", float),
                    ("cluster_0", float),
                    ("cluster_1", float),
                    ("cluster_2", float),
                    ("cluster_3", float),
                    ("cluster_4", float),
                    ("cluster_5", float),
                ],
            ).view(np.recarray)
            try:
                n_cluster_table = out_file_h5.createTable(
                    out_file_h5.root,
                    name="n_cluster",
                    description=rec_array,
                    title="Cluster per event",
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                n_cluster_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(output_file + " has already a Beamspot note, do not overwrite existing.")
    return time_stamp, n_cluster
コード例 #33
0
ファイル: analysis.py プロジェクト: makoc/pyBAR
def analyze_cluster_size_per_scan_parameter(
    input_file_hits,
    output_file_cluster_size,
    parameter="GDAC",
    max_chunk_size=10000000,
    overwrite_output_files=False,
    output_pdf=None,
):
    """ This method takes multiple hit files and determines the cluster size for different scan parameter values of

     Parameters
    ----------
    input_files_hits: string
    output_file_cluster_size: string
        The data file with the results
    parameter: string
        The name of the parameter to separate the data into (e.g.: PlsrDAC)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    overwrite_output_files: bool
        Set to true to overwrite the output file if it already exists
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen, if False nothing is printed
    """
    logging.info("Analyze the cluster sizes for different " + parameter + " settings for " + input_file_hits)
    if os.path.isfile(output_file_cluster_size) and not overwrite_output_files:  # skip analysis if already done
        logging.info(
            "Analyzed cluster size file " + output_file_cluster_size + " already exists. Skip cluster size analysis."
        )
    else:
        with tb.openFile(output_file_cluster_size, mode="w") as out_file_h5:  # file to write the data into
            filter_table = tb.Filters(complib="blosc", complevel=5, fletcher32=False)  # compression of the written data
            parameter_goup = out_file_h5.createGroup(
                out_file_h5.root, parameter, title=parameter
            )  # note to store the data
            cluster_size_total = None  # final array for the cluster size per GDAC
            with tb.openFile(input_file_hits, mode="r+") as in_hit_file_h5:  # open the actual hit file
                meta_data_array = in_hit_file_h5.root.meta_data[:]
                scan_parameter = analysis_utils.get_scan_parameter(meta_data_array)  # get the scan parameters
                if scan_parameter:  # if a GDAC scan parameter was used analyze the cluster size per GDAC setting
                    scan_parameter_values = scan_parameter[parameter]  # scan parameter settings used
                    if (
                        len(scan_parameter_values) == 1
                    ):  # only analyze per scan step if there are more than one scan step
                        logging.warning(
                            "The file "
                            + str(input_file_hits)
                            + " has no different "
                            + str(parameter)
                            + " parameter values. Omit analysis."
                        )
                    else:
                        logging.info(
                            "Analyze "
                            + input_file_hits
                            + " per scan parameter "
                            + parameter
                            + " for "
                            + str(len(scan_parameter_values))
                            + " values from "
                            + str(np.amin(scan_parameter_values))
                            + " to "
                            + str(np.amax(scan_parameter_values))
                        )
                        event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)[
                            "event_number"
                        ]  # get the event numbers in meta_data where the scan parameter changes
                        parameter_ranges = np.column_stack(
                            (scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))
                        )
                        hit_table = in_hit_file_h5.root.Hits
                        analysis_utils.index_event_number(hit_table)
                        total_hits, total_hits_2, index = 0, 0, 0
                        chunk_size = max_chunk_size
                        # initialize the analysis and set settings
                        analyze_data = AnalyzeRawData()
                        analyze_data.create_cluster_size_hist = True
                        analyze_data.create_cluster_tot_hist = True
                        analyze_data.histograming.set_no_scan_parameter()  # one has to tell the histogramer the # of scan parameters for correct occupancy hist allocation
                        progress_bar = progressbar.ProgressBar(
                            widgets=[
                                "",
                                progressbar.Percentage(),
                                " ",
                                progressbar.Bar(marker="*", left="|", right="|"),
                                " ",
                                analysis_utils.ETA(),
                            ],
                            maxval=hit_table.shape[0],
                            term_width=80,
                        )
                        progress_bar.start()
                        for parameter_index, parameter_range in enumerate(
                            parameter_ranges
                        ):  # loop over the selected events
                            analyze_data.reset()  # resets the data of the last analysis
                            logging.debug(
                                "Analyze GDAC = "
                                + str(parameter_range[0])
                                + " "
                                + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0)))
                                + "%"
                            )
                            start_event_number = parameter_range[1]
                            stop_event_number = parameter_range[2]
                            logging.debug(
                                "Data from events = [" + str(start_event_number) + "," + str(stop_event_number) + "["
                            )
                            actual_parameter_group = out_file_h5.createGroup(
                                parameter_goup,
                                name=parameter + "_" + str(parameter_range[0]),
                                title=parameter + "_" + str(parameter_range[0]),
                            )
                            # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given
                            readout_hit_len = (
                                0
                            )  # variable to calculate a optimal chunk size value from the number of hits for speed up
                            for hits, index in analysis_utils.data_aligned_at_events(
                                hit_table,
                                start_event_number=start_event_number,
                                stop_event_number=stop_event_number,
                                start=index,
                                chunk_size=chunk_size,
                            ):
                                total_hits += hits.shape[0]
                                analyze_data.analyze_hits(hits)  # analyze the selected hits in chunks
                                readout_hit_len += hits.shape[0]
                                progress_bar.update(index)
                            chunk_size = (
                                int(1.05 * readout_hit_len)
                                if int(1.05 * readout_hit_len) < max_chunk_size
                                else max_chunk_size
                            )  # to increase the readout speed, estimated the number of hits for one read instruction
                            if (
                                chunk_size < 50
                            ):  # limit the lower chunk size, there can always be a crazy event with more than 20 hits
                                chunk_size = 50
                            # get occupancy hist
                            occupancy = (
                                analyze_data.histograming.get_occupancy()
                            )  # just here to check histograming is consistend

                            # store and plot cluster size hist
                            cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist()
                            cluster_size_hist_table = out_file_h5.createCArray(
                                actual_parameter_group,
                                name="HistClusterSize",
                                title="Cluster Size Histogram",
                                atom=tb.Atom.from_dtype(cluster_size_hist.dtype),
                                shape=cluster_size_hist.shape,
                                filters=filter_table,
                            )
                            cluster_size_hist_table[:] = cluster_size_hist
                            if output_pdf is not False:
                                plotting.plot_cluster_size(
                                    hist=cluster_size_hist,
                                    title="Cluster size ("
                                    + str(np.sum(cluster_size_hist))
                                    + " entries) for "
                                    + parameter
                                    + " = "
                                    + str(scan_parameter_values[parameter_index]),
                                    filename=output_pdf,
                                )
                            if cluster_size_total is None:  # true if no data was appended to the array yet
                                cluster_size_total = cluster_size_hist
                            else:
                                cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist])

                            total_hits_2 += np.sum(occupancy)
                        progress_bar.finish()
                        if total_hits != total_hits_2:
                            logging.warning("Analysis shows inconsistent number of hits. Check needed!")
                        logging.info("Analyzed %d hits!", total_hits)
            cluster_size_total_out = out_file_h5.createCArray(
                out_file_h5.root,
                name="AllHistClusterSize",
                title="All Cluster Size Histograms",
                atom=tb.Atom.from_dtype(cluster_size_total.dtype),
                shape=cluster_size_total.shape,
                filters=filter_table,
            )
            cluster_size_total_out[:] = cluster_size_total