コード例 #1
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold[self.increase_threshold])
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac[self.increase_threshold])
        self.register.set_pixel_register_value('Enable', self.last_good_enable_mask[0])  # use enable mask from the lowest point to mask bad pixels
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist[self.increase_threshold].T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask[self.increase_threshold].T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_three_way(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
            plot_occupancy(self.last_good_tdac[self.increase_threshold].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=31, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[self.increase_threshold].T, title='Intermediate Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[self.increase_threshold], self.last_step[self.increase_threshold]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[self.increase_threshold].T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_good_enable_mask[0].T, title='Final Enable Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val[0], self.last_step[0]), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_good_enable_mask[0].T, filename=analyze_raw_data.output_pdf)
コード例 #2
0
ファイル: tune_fdac.py プロジェクト: PatrickAhl/pyBAR
    def analyze(self):
        self.register.set_pixel_register_value("FDAC", self.fdac_mask_best)

        plot_three_way(hist=self.tot_mean_best.transpose(), title="Mean ToT after FDAC tuning", x_axis_title="Mean ToT", filename=self.plots_filename, minimum=0, maximum=15)
        plot_three_way(hist=self.fdac_mask_best.transpose(), title="FDAC distribution after tuning", x_axis_title="FDAC", filename=self.plots_filename, minimum=0, maximum=15)
        if self.close_plots:
            self.plots_filename.close()
コード例 #3
0
ファイル: tune_gdac_standard.py プロジェクト: Filou-dlp/pyBAR
    def analyze(self):
        # set here because original value is restored after scan()
        self.register_utils.set_gdac(self.gdac_best, send_command=False)
        # write configuration to avoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(
            self.register.get_commands(
                "WrRegister", name=["Vthin_AltCoarse", "Vthin_AltFine"]))
        self.register_utils.send_commands(commands)

        plot_three_way(
            self.occ_array_sel_pixels_best.transpose(),
            title="Occupancy after GDAC tuning of selected pixels (GDAC " +
            str(self.gdac_best) + ")",
            x_axis_title='Occupancy',
            filename=self.plots_filename,
            maximum=self.n_injections_gdac)
        plot_three_way(
            self.occ_array_desel_pixels_best.transpose(),
            title="Occupancy after GDAC tuning of not selected pixels (GDAC " +
            str(self.gdac_best) + ")",
            x_axis_title='Occupancy',
            filename=self.plots_filename,
            maximum=self.n_injections_gdac)
        if self.close_plots:
            self.plots_filename.close()
コード例 #4
0
    def analyze(self):
        # set here because original value is restored after scan()
        self.register.set_pixel_register_value("FDAC", self.fdac_mask_best)
        # write configuration to avoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(
            self.register.get_commands("WrFrontEnd",
                                       same_mask_for_all_dc=False,
                                       name="FDAC"))
        self.register_utils.send_commands(commands)

        plot_three_way(hist=self.tot_mean_best.transpose(),
                       title="Mean ToT after FDAC tuning",
                       x_axis_title="Mean ToT",
                       filename=self.plots_filename,
                       minimum=0,
                       maximum=15)
        plot_three_way(hist=self.fdac_mask_best.transpose(),
                       title="FDAC distribution after tuning",
                       x_axis_title="FDAC",
                       filename=self.plots_filename,
                       minimum=0,
                       maximum=15)
        if self.close_plots:
            self.plots_filename.close()
コード例 #5
0
ファイル: tune_tdac.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        self.register.set_pixel_register_value("TDAC", self.tdac_mask_best)

        plot_three_way(hist=self.occupancy_best.transpose(), title="Occupancy after TDAC tuning", x_axis_title="Occupancy", filename=self.plots_filename, maximum=self.n_injections_tdac)
        plot_three_way(hist=self.tdac_mask_best.transpose(), title="TDAC distribution after tuning", x_axis_title="TDAC", filename=self.plots_filename, maximum=32)
        if self.close_plots:
            self.plots_filename.close()
コード例 #6
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold + self.increase_threshold)
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac)
        self.register.set_pixel_register_value('Enable', self.last_good_enable_mask)
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist.T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist.T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask.T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask.T, filename=analyze_raw_data.output_pdf)
            plot_three_way(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
            plot_occupancy(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=31, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.register.get_pixel_register_value('Enable').T, title='Enable Mask', z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.register.get_pixel_register_value('Enable').T, filename=analyze_raw_data.output_pdf)
コード例 #7
0
ファイル: tune_tdac.py プロジェクト: experimentAccount0/pyBAR
    def analyze(self):
        # set here because original value is restored after scan()
        self.register.set_pixel_register_value("TDAC", self.tdac_mask_best)

        plot_three_way(hist=self.occupancy_best.transpose(), title="Occupancy after TDAC tuning", x_axis_title="Occupancy", filename=self.plots_filename, maximum=self.n_injections_tdac)
        plot_three_way(hist=self.tdac_mask_best.transpose(), title="TDAC distribution after tuning", x_axis_title="TDAC", filename=self.plots_filename, maximum=32)
        if self.close_plots:
            self.plots_filename.close()
コード例 #8
0
    def analyze(self):
        # set here because original value is restored after scan()
        self.register_utils.set_gdac(self.gdac_best, send_command=False)

        plot_three_way(self.occ_array_sel_pixels_best.transpose(), title="Occupancy after GDAC tuning of selected pixels (GDAC " + str(self.gdac_best) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac)

        plot_three_way(self.occ_array_desel_pixels_best.transpose(), title="Occupancy after GDAC tuning of not selected pixels (GDAC " + str(self.gdac_best) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac)
        if self.close_plots:
            self.plots_filename.close()
コード例 #9
0
ファイル: scan_ileak.py プロジェクト: ljthink/pyBAR
 def analyze(self):
     with tb.open_file(self.output_filename + '.h5', 'r') as in_file_h5:
         data = in_file_h5.root.Ileak_map[:]
         data = np.ma.masked_where(data == 0, data)
         plot_three_way(hist=data.transpose(),
                        title="Ileak",
                        x_axis_title="Ileak",
                        filename=self.output_filename +
                        '.pdf')  # , minimum=0, maximum=np.amax(data))
コード例 #10
0
ファイル: plot_occupancy.py プロジェクト: rggama/pyBAR
def draw_hit_map_from_raw_data(raw_data_file, front_ends):
    with PdfPages(os.path.splitext(raw_data_file)[0] + '.pdf') as output_pdf:
        with tb.open_file(raw_data_file, 'r') as in_file_h5:
            raw_data = in_file_h5.root.raw_data[:]
            for front_end in range(front_ends):
                print 'Create occupancy hist of front end %d' % front_end
                occupancy_array, _, _ = np.histogram2d(*readout_utils.convert_data_array(raw_data,
                                                                                         filter_func=readout_utils.logical_and(readout_utils.is_data_record, readout_utils.is_data_from_channel(4 - front_end)),
                                                                                         converter_func=readout_utils.get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]])
                plotting.plot_three_way(hist=occupancy_array.T, title="Occupancy of chip %d" % front_end, x_axis_title="Occupancy", filename=output_pdf)
コード例 #11
0
ファイル: plot_occupancy.py プロジェクト: PatrickAhl/pyBAR
def draw_hit_map_from_raw_data(raw_data_file, front_ends):
    with PdfPages(raw_data_file[:-3] + '.pdf') as output_pdf:
        with tb.open_file(raw_data_file, 'r') as in_file_h5:
            raw_data = in_file_h5.root.raw_data[:]
            for front_end in range(front_ends):
                print 'Create occupancy hist of front end %d' % front_end
                occupancy_array, _, _ = np.histogram2d(*readout_utils.convert_data_array(raw_data,
                                                                                         filter_func=readout_utils.logical_and(readout_utils.is_data_record, readout_utils.is_data_from_channel(4 - front_end)),
                                                                                         converter_func=readout_utils.get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]])
                plotting.plot_three_way(hist=occupancy_array.T, title="Occupancy of chip %d" % front_end, x_axis_title="Occupancy", filename=output_pdf)
コード例 #12
0
ファイル: tune_gdac.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        self.register_utils.set_gdac(self.gdac_best, send_command=False)

        plot_three_way(self.occ_array_sel_pixel.transpose(),
                       title="Occupancy after GDAC tuning (GDAC " +
                       str(self.scan_parameters.GDAC) + ")",
                       x_axis_title='Occupancy',
                       filename=self.plots_filename,
                       maximum=self.n_injections_gdac)
        if self.close_plots:
            self.plots_filename.close()
コード例 #13
0
ファイル: tune_gdac.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        self.register_utils.set_gdac(self.gdac_best, send_command=False)

        plot_three_way(
            self.occ_array_sel_pixel.transpose(),
            title="Occupancy after GDAC tuning (GDAC " + str(self.scan_parameters.GDAC) + ")",
            x_axis_title="Occupancy",
            filename=self.plots_filename,
            maximum=self.n_injections_gdac,
        )
        if self.close_plots:
            self.plots_filename.close()
コード例 #14
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.threshold[0])
        self.register.set_pixel_register_value('TDAC', self.new_tdac[0])
        self.register.set_pixel_register_value('Enable', self.new_enable_mask[0])  # use enable mask from the lowest point to mask bad pixels
        # write configuration to avaoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltFine"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="Enable"))
        self.register_utils.send_commands(commands)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            last_step = None
            for step in range(self.plot_n_steps, -1, -1):
                if self.threshold[step] is not None:
                    plot_occupancy(self.occupancy_hist[step].T, title='Occupancy at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), filename=analyze_raw_data.output_pdf)
                    plot_fancy_occupancy(self.occupancy_hist[step].T, filename=analyze_raw_data.output_pdf)
                    plot_occupancy(self.occupancy_mask[step].T, title='Noisy pixels at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=1, filename=analyze_raw_data.output_pdf)
                    plot_fancy_occupancy(self.occupancy_mask[step].T, filename=analyze_raw_data.output_pdf)
                    plot_three_way(self.tdac[step].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
                    plot_occupancy(self.tdac[step].T, title='TDAC at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=31, filename=analyze_raw_data.output_pdf)
                    plot_occupancy(self.enable_mask[step].T, title='Enable mask at Vthin_AltFine %d Step %d' % (self.threshold[step], self.tdac_step[step]), z_max=1, filename=analyze_raw_data.output_pdf)
                    # adding Poisson statistics plots
                    fig = Figure()
                    FigureCanvas(fig)
                    ax = fig.add_subplot(111)
                    ax.set_title("Hit statistics")
                    hist, bin_edges = np.histogram(self.occupancy_hist[step], bins=np.arange(0.0, np.max(self.occupancy_hist[step]) + 2, 1.0))
                    try:
                        _, idx = hist_quantiles(hist, [0.0, 0.9], return_indices=True)
                    except IndexError:
                        idx = [0, 1]
                    bins = np.arange(0, np.maximum(bin_edges[idx[1]], stats.poisson.ppf(0.9999, mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1)) + 2, 1)
                    ax.hist(self.occupancy_hist[step].flatten(), bins=bins, align='left', alpha=0.5, label="Measured occupancy")
                    ax.bar(x=bins[:-1], height=stats.poisson.pmf(k=bins[:-1], mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1) * self.enable_mask[step].sum(), alpha=0.5, width=1.0, color="r", label="Expected occupancy (Poisson statistics)")
                    # ax.hist(stats.poisson.rvs(mu=self.occupancy_limit * self.n_triggers * self.consecutive_lvl1, size=self.enable_mask[step].sum()), bins=bins, align='left', alpha=0.5, label="Expected occupancy (Poisson statistics)")
                    ax.set_xlabel('#Hits')
                    ax.set_ylabel('#Pixels')
                    ax.legend()
                    analyze_raw_data.output_pdf.savefig(fig)
                    last_step = step
            if last_step is not None:
                plot_three_way(self.new_tdac[last_step].T, title='Final TDAC after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
                plot_occupancy(self.new_tdac[last_step].T, title='Final TDAC after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), z_max=31, filename=analyze_raw_data.output_pdf)
                plot_occupancy(self.new_enable_mask[last_step].T, title='Final Enable mask after Vthin_AltFine %d Step %d' % (self.threshold[last_step], self.tdac_step[last_step]), z_max=1, filename=analyze_raw_data.output_pdf)
コード例 #15
0
    def analyze(self):
        if self.global_iterations:
            GdacTuning.analyze(self)
            FeedbackTuning.analyze(self)
        if self.local_iterations:
            TdacTuning.analyze(self)
            FdacTuning.analyze(self)

        if self.make_plots:
            if self.local_iterations:
                plot_three_way(hist=self.tot_mean_best.transpose(),
                               title="Mean ToT after last FDAC tuning",
                               x_axis_title='Mean ToT',
                               filename=self.plots_filename)
                plot_three_way(
                    hist=self.register.get_pixel_register_value(
                        "FDAC").transpose(),
                    title="FDAC distribution after last FDAC tuning",
                    x_axis_title='FDAC',
                    filename=self.plots_filename,
                    maximum=16)
            if self.local_iterations >= 0:
                plot_three_way(hist=self.occupancy_best.transpose(),
                               title="Occupancy after tuning",
                               x_axis_title='Occupancy',
                               filename=self.plots_filename,
                               maximum=100)
                plot_three_way(hist=self.register.get_pixel_register_value(
                    "TDAC").transpose(),
                               title="TDAC distribution after complete tuning",
                               x_axis_title='TDAC',
                               filename=self.plots_filename,
                               maximum=32)

            self.plots_filename.close()
コード例 #16
0
    def analyze(self):
        if self.global_iterations > 0:
            FeedbackTuning.analyze(self)
        if self.global_iterations >= 0:
            GdacTuning.analyze(self)

        if self.local_iterations > 0:
            FdacTuning.analyze(self)
        if self.local_iterations >= 0:
            TdacTuning.analyze(self)

        # write configuration to avoid high current states
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        commands.extend(self.register.get_commands("WrRegister", name=["Vthin_AltCoarse", "Vthin_AltFine", "PrmpVbpf"]))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="TDAC"))
        commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="FDAC"))
        self.register_utils.send_commands(commands)

        if self.local_iterations > 0:
            plot_three_way(hist=self.tot_mean_best.transpose(), title="Mean ToT after last FDAC tuning", x_axis_title='Mean ToT', filename=self.plots_filename, maximum=15)
            plot_three_way(hist=self.register.get_pixel_register_value("FDAC").transpose(), title="FDAC distribution after last FDAC tuning", x_axis_title='FDAC', filename=self.plots_filename, maximum=15)
        if self.local_iterations >= 0:
            plot_three_way(hist=self.occupancy_best.transpose(), title="Occupancy after last TDAC tuning", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_tdac)
            plot_three_way(hist=self.register.get_pixel_register_value("TDAC").transpose(), title="TDAC distribution after last TDAC tuning", x_axis_title='TDAC', filename=self.plots_filename, maximum=31)

        self.plots_filename.close()
コード例 #17
0
    def analyze(self):
        self.register.set_pixel_register_value("FDAC", self.fdac_mask_best)

        plot_three_way(hist=self.tot_mean_best.transpose(),
                       title="Mean ToT after FDAC tuning",
                       x_axis_title="Mean ToT",
                       filename=self.plots_filename,
                       minimum=0,
                       maximum=15)
        plot_three_way(hist=self.fdac_mask_best.transpose(),
                       title="FDAC distribution after tuning",
                       x_axis_title="FDAC",
                       filename=self.plots_filename,
                       minimum=0,
                       maximum=15)
        if self.close_plots:
            self.plots_filename.close()
コード例 #18
0
    def analyze(self):
        self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold + self.increase_threshold)
        self.register.set_pixel_register_value('TDAC', self.last_good_tdac)
        self.register.set_pixel_register_value('Enable', self.last_good_enable_mask)

        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
            analyze_raw_data.create_source_scan_hist = True
            analyze_raw_data.interpreter.set_warning_output(False)
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()
            analyze_raw_data.plot_histograms()
            plot_occupancy(self.last_occupancy_hist.T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_hist.T, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.last_occupancy_mask.T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.last_occupancy_mask.T, filename=analyze_raw_data.output_pdf)
            plot_three_way(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32)
            plot_occupancy(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=31, filename=analyze_raw_data.output_pdf)
            plot_occupancy(self.register.get_pixel_register_value('Enable').T, title='Enable Mask', z_max=1, filename=analyze_raw_data.output_pdf)
            plot_fancy_occupancy(self.register.get_pixel_register_value('Enable').T, filename=analyze_raw_data.output_pdf)
コード例 #19
0
ファイル: test_register.py プロジェクト: makoc/pyBAR
    def test_pixel_register(self,
                            pix_regs=[
                                "EnableDigInj", "Imon", "Enable", "C_High",
                                "C_Low", "TDAC", "FDAC"
                            ],
                            dcs=range(40)):
        '''Test Pixel Register
        '''
        logging.info('Running Pixel Register Test for %s', str(pix_regs))
        self.register_utils.configure_pixel()
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        self.register_utils.send_commands(commands)
        self.fifo_readout.reset_sram_fifo()

        pixel_register_errors = 0

        plots = PdfPages(self.output_filename + ".pdf")

        for i, result in enumerate(
                read_pixel_register(self, pix_regs=pix_regs, dcs=dcs)):
            result_array = np.ones_like(result)
            result_array.data[result == self.register.get_pixel_register_value(
                pix_regs[i])] = 0
            pixel_register_errors += np.count_nonzero(result_array == 1)
            logging.info("Pixel register %s: %d pixel error", pix_regs[i],
                         np.count_nonzero(result_array == 1))
            plotting.plot_three_way(
                result_array.T,
                title=str(pix_regs[i]) + " register test with " +
                str(np.count_nonzero(result_array == 1)) + '/' +
                str(26880 - np.ma.count_masked(result_array)) +
                " pixel failing",
                x_axis_title="0:OK, 1:FAIL",
                maximum=1,
                filename=plots)

        plots.close()
        return pixel_register_errors
コード例 #20
0
ファイル: test_register.py プロジェクト: PatrickAhl/pyBAR
    def test_pixel_register(self, pix_regs=["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"], dcs=range(40)):
        '''Test Pixel Register
        '''
        logging.info('Running Pixel Register Test for %s', str(pix_regs))
        self.register_utils.configure_pixel()
        commands = []
        commands.extend(self.register.get_commands("ConfMode"))
        self.register_utils.send_commands(commands)
        self.fifo_readout.reset_sram_fifo()

        pixel_register_errors = 0

        plots = PdfPages(self.output_filename + ".pdf")

        for i, result in enumerate(read_pixel_register(self, pix_regs=pix_regs, dcs=dcs)):
            result_array = np.ones_like(result)
            result_array.data[result == self.register.get_pixel_register_value(pix_regs[i])] = 0
            pixel_register_errors += np.count_nonzero(result_array == 1)
            logging.info("Pixel register %s: %d pixel error", pix_regs[i], np.count_nonzero(result_array == 1))
            plotting.plot_three_way(result_array.T, title=str(pix_regs[i]) + " register test with " + str(np.count_nonzero(result_array == 1)) + '/' + str(26880 - np.ma.count_masked(result_array)) + " pixel failing", x_axis_title="0:OK, 1:FAIL", maximum=1, filename=plots)

        plots.close()
        return pixel_register_errors
コード例 #21
0
ファイル: tune_fei4.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        if self.global_iterations:
            GdacTuning.analyze(self)
            FeedbackTuning.analyze(self)
        if self.local_iterations:
            TdacTuning.analyze(self)
            FdacTuning.analyze(self)

        if self.make_plots:
            if self.local_iterations:
                plot_three_way(hist=self.tot_mean_best.transpose(), title="Mean ToT after last FDAC tuning", x_axis_title='Mean ToT', filename=self.plots_filename)
                plot_three_way(hist=self.register.get_pixel_register_value("FDAC").transpose(), title="FDAC distribution after last FDAC tuning", x_axis_title='FDAC', filename=self.plots_filename, maximum=16)
            if self.local_iterations >= 0:
                plot_three_way(hist=self.occupancy_best.transpose(), title="Occupancy after tuning", x_axis_title='Occupancy', filename=self.plots_filename, maximum=100)
                plot_three_way(hist=self.register.get_pixel_register_value("TDAC").transpose(), title="TDAC distribution after complete tuning", x_axis_title='TDAC', filename=self.plots_filename, maximum=32)

            self.plots_filename.close()
コード例 #22
0
    def scan(self):
        enable_mask_steps = []
        cal_lvl1_command = self.register.get_commands(
            "CAL")[0] + self.register.get_commands(
                "zeros", length=40)[0] + self.register.get_commands("LV1")[0]

        self.write_target_threshold()
        additional_scan = True
        lastBitResult = np.zeros(
            shape=self.register.get_pixel_register_value("TDAC").shape,
            dtype=self.register.get_pixel_register_value("TDAC").dtype)

        self.set_start_tdac()

        self.occupancy_best = np.full(
            shape=(80, 336), fill_value=self.n_injections_tdac
        )  # array to store the best occupancy (closest to Ninjections/2) of the pixel
        self.tdac_mask_best = self.register.get_pixel_register_value("TDAC")
        tdac_tune_bits = self.tdac_tune_bits[:]
        for scan_parameter_value, tdac_bit in enumerate(tdac_tune_bits):
            if self.stop_run.is_set():
                break
            if additional_scan:
                self.set_tdac_bit(tdac_bit)
                logging.info('TDAC setting: bit %d = 1', tdac_bit)
            else:
                self.set_tdac_bit(tdac_bit, bit_value=0)
                logging.info('TDAC setting: bit %d = 0', tdac_bit)

            self.write_tdac_config()

            with self.readout(TDAC=scan_parameter_value, fill_buffer=True):
                scan_loop(self,
                          command=cal_lvl1_command,
                          repeat_command=self.n_injections_tdac,
                          mask_steps=self.mask_steps,
                          enable_mask_steps=enable_mask_steps,
                          enable_double_columns=None,
                          same_mask_for_all_dc=self.same_mask_for_all_dc,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            data = convert_data_array(
                array=self.read_data(),
                filter_func=is_data_record,
                converter_func=get_col_row_array_from_data_record_array)
            occupancy_array, _, _ = np.histogram2d(*data,
                                                   bins=(80, 336),
                                                   range=[[1, 80], [1, 336]])
            select_better_pixel_mask = abs(occupancy_array -
                                           self.n_injections_tdac / 2) <= abs(
                                               self.occupancy_best -
                                               self.n_injections_tdac / 2)
            pixel_with_too_high_occupancy_mask = occupancy_array > self.n_injections_tdac / 2
            self.occupancy_best[select_better_pixel_mask] = occupancy_array[
                select_better_pixel_mask]

            if self.plot_intermediate_steps:
                plot_three_way(occupancy_array.transpose(),
                               title="Occupancy (TDAC tuning bit " +
                               str(tdac_bit) + ")",
                               x_axis_title='Occupancy',
                               filename=self.plots_filename,
                               maximum=self.n_injections_tdac)

            tdac_mask = self.register.get_pixel_register_value("TDAC")
            self.tdac_mask_best[select_better_pixel_mask] = tdac_mask[
                select_better_pixel_mask]

            if tdac_bit > 0:
                tdac_mask[pixel_with_too_high_occupancy_mask] = tdac_mask[
                    pixel_with_too_high_occupancy_mask] & ~(1 << tdac_bit)
                self.register.set_pixel_register_value("TDAC", tdac_mask)

            if tdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    lastBitResult = occupancy_array.copy()
                    tdac_tune_bits.append(0)  # bit 0 has to be scanned twice
                else:
                    tdac_mask[
                        abs(occupancy_array - self.n_injections_tdac / 2) >
                        abs(lastBitResult -
                            self.n_injections_tdac / 2)] = tdac_mask[
                                abs(occupancy_array - self.n_injections_tdac /
                                    2) > abs(lastBitResult -
                                             self.n_injections_tdac / 2)] | (
                                                 1 << tdac_bit)
                    occupancy_array[
                        abs(occupancy_array - self.n_injections_tdac / 2) >
                        abs(lastBitResult -
                            self.n_injections_tdac / 2)] = lastBitResult[
                                abs(occupancy_array - self.n_injections_tdac /
                                    2) > abs(lastBitResult -
                                             self.n_injections_tdac / 2)]
                    self.occupancy_best[
                        abs(occupancy_array - self.n_injections_tdac / 2) <=
                        abs(self.occupancy_best -
                            self.n_injections_tdac / 2)] = occupancy_array[
                                abs(occupancy_array - self.n_injections_tdac /
                                    2) <= abs(self.occupancy_best -
                                              self.n_injections_tdac / 2)]
                    self.tdac_mask_best[
                        abs(occupancy_array - self.n_injections_tdac / 2) <=
                        abs(self.occupancy_best -
                            self.n_injections_tdac / 2)] = tdac_mask[
                                abs(occupancy_array - self.n_injections_tdac /
                                    2) <= abs(self.occupancy_best -
                                              self.n_injections_tdac / 2)]

        self.register.set_pixel_register_value(
            "TDAC", self.tdac_mask_best)  # set value for meta scan
        self.write_tdac_config()
コード例 #23
0
def create_threshold_calibration(scan_base_file_name, create_plots=True):  # Create calibration function, can be called stand alone
    def analyze_raw_data_file(file_name):
        if os.path.isfile(file_name[:-3] + '_interpreted.h5'):  # skip analysis if already done
            logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.')
        else:
            with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_tot_pixel_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(False)  # RX errors would fill the console
                analyze_raw_data.interpret_word_table()

    def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values):
        logging.info("Storing calibration data in a table...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table)
        threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table)
        for column in range(80):
            for row in range(336):
                for parameter_value_index, parameter_value in enumerate(parameter_values):
                    threshold_calib_table.row['column'] = column
                    threshold_calib_table.row['row'] = row
                    threshold_calib_table.row['parameter_value'] = parameter_value
                    threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index]
                    threshold_calib_table.row.append()
        for parameter_value_index, parameter_value in enumerate(parameter_values):
            mean_threshold_calib_table.row['parameter_value'] = parameter_value
            mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index]
            mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index]
            mean_threshold_calib_table.row.append()
        threshold_calib_table.flush()
        mean_threshold_calib_table.flush()
        logging.info("done")

    def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values):
        logging.info("Storing calibration data in an array...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table)
        mean_threshold_calib_rms_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table)
        threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table)
        mean_threshold_calib_array[:] = mean_threshold_calibration
        mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
        threshold_calib_array[:] = threshold_calibration
        mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name]
        threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
        mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
        threshold_calib_array.attrs.scan_parameter_values = parameter_values

        logging.info("done")

    def mask_columns(pixel_array, ignore_columns):
        idx = np.array(ignore_columns) - 1  # from FE to Array columns
        m = np.zeros_like(pixel_array)
        m[:, idx] = 1
        return np.ma.masked_array(pixel_array, m)

    raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name, filter_file_words=['interpreted', 'calibration_calibration'])
    first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0]  # multilpe scan_base_file_names for multiple runs

    with tb.openFile(first_scan_base_file_name + '.h5', mode="r") as in_file_h5:  # deduce scan parameters from the first (and often only) scan base file name
        ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0]
        parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0]
        ignore_columns = ast.literal_eval(ignore_columns)
        parameter_name = ast.literal_eval(parameter_name)[1][0]

    calibration_file = first_scan_base_file_name + '_calibration'

    for raw_data_file in raw_data_files:  # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
        analyze_raw_data_file(raw_data_file)

    files_per_parameter = analysis_utils.get_parameter_value_from_file_names([file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True)

    logging.info("Create calibration from data")
    mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8')

    if create_plots:
        logging.info('Saving calibration plots in: %s', calibration_file + '.pdf')
        output_pdf = PdfPages(calibration_file + '.pdf')

    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80)
    progress_bar.start()
    parameter_values = []
    for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()):
        parameter_values.append(parameters.values()[0][0])
        with tb.openFile(analyzed_data_file, mode="r") as in_file_h5:
            occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns)  # mask the not scanned columns for analysis and plotting
            thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns)
            if create_plots:
                plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf)
                plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
                plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked)
            threshold_calibration[:, :, index] = thresholds_masked.T
        progress_bar.update(index)
    progress_bar.finish()

    with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5:
        store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values)
        store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values)

    if create_plots:
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf)
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf)
        output_pdf.close()
コード例 #24
0
def histogram_tdc_hits(input_file_hits, hit_selection_conditions, event_status_select_mask, event_status_condition, calibation_file=None, max_tdc=analysis_configuration['max_tdc'], n_bins=analysis_configuration['n_bins']):
    for condition in hit_selection_conditions:
        logging.info('Histogram tdc hits with %s', condition)

    def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration):  # return the charge from calibration
        charge_calibration = np.zeros(shape=(80, 336, max_tdc))
        for column in range(80):
            for row in range(336):
                actual_pixel_calibration = tdc_pixel_calibration[column, row, :]
                if np.any(actual_pixel_calibration != 0) and np.all(np.isfinite(actual_pixel_calibration)):
                    interpolation = interp1d(x=actual_pixel_calibration, y=tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0)
                    charge_calibration[column, row, :] = interpolation(np.arange(max_tdc))
        return charge_calibration

    def plot_tdc_tot_correlation(data, condition, output_pdf):
        logging.info('Plot correlation histogram for %s', condition)
        plt.clf()
        data = np.ma.array(data, mask=(data <= 0))
        if np.ma.any(data > 0):
            cmap = cm.get_cmap('jet', 200)
            cmap.set_bad('w')
            plt.title('Correlation with %s' % condition)
            norm = colors.LogNorm()
            z_max = data.max(fill_value=0)
            plt.xlabel('TDC')
            plt.ylabel('TOT')
            im = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', interpolation='nearest')  # , norm=norm)
            divider = make_axes_locatable(plt.gca())
            plt.gca().invert_yaxis()
            cax = divider.append_axes("right", size="5%", pad=0.1)
            plt.colorbar(im, cax=cax, ticks=np.linspace(start=0, stop=z_max, num=9, endpoint=True))
            output_pdf.savefig()
        else:
            logging.warning('No data for correlation plotting for %s', condition)

    def plot_hits_per_condition(output_pdf):
        logging.info('Plot hits selection efficiency histogram for %d conditions', len(hit_selection_conditions) + 2)
        labels = ['All Hits', 'Hits of\ngood events']
        for condition in hit_selection_conditions:
            condition = re.sub('[&]', '\n', condition)
            condition = re.sub('[()]', '', condition)
            labels.append(condition)
        plt.bar(range(len(n_hits_per_condition)), n_hits_per_condition, align='center')
        plt.xticks(range(len(n_hits_per_condition)), labels, size=8)
        plt.title('Number of hits for different cuts')
        plt.yscale('log')
        plt.ylabel('#')
        plt.grid()
        for x, y in zip(np.arange(len(n_hits_per_condition)), n_hits_per_condition):
            plt.annotate('%d' % (float(y) / float(n_hits_per_condition[0]) * 100.) + r'%', xy=(x, y / 2.), xycoords='data', color='grey', size=15)
        output_pdf.savefig()

    def plot_corrected_tdc_hist(x, y, title, output_pdf, point_style='-'):
        logging.info('Plot TDC hist with TDC calibration')
        plt.clf()
        y /= np.amax(y) if y.shape[0] > 0 else y
        plt.plot(x, y, point_style)
        plt.title(title, size=10)
        plt.xlabel('Charge [PlsrDAC]')
        plt.ylabel('Count [a.u.]')
        plt.grid()
        output_pdf.savefig()

    # Create data
    with tb.openFile(input_file_hits, mode="r") as in_hit_file_h5:
        cluster_hit_table = in_hit_file_h5.root.ClusterHits

        # Result hists, initialized per condition
        pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336, max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336, 256), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_hists_per_condition = [np.zeros(shape=(max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_corr_hists_per_condition = [np.zeros(shape=(max_tdc, 16), dtype=np.uint32) for _ in hit_selection_conditions] if hit_selection_conditions else []

        n_hits_per_condition = [0 for _ in range(len(hit_selection_conditions) + 2)]  # condition 1, 2 are all hits, hits of goode events

        logging.info('Select hits and create TDC histograms for %d cut conditions', len(hit_selection_conditions))
        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_hit_table.shape[0], term_width=80)
        progress_bar.start()
        for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=1e8):
            n_hits_per_condition[0] += cluster_hits.shape[0]
            selected_events_cluster_hits = cluster_hits[np.logical_and(cluster_hits['TDC'] < max_tdc, (cluster_hits['event_status'] & event_status_select_mask) == event_status_condition)]
            n_hits_per_condition[1] += selected_events_cluster_hits.shape[0]
            for index, condition in enumerate(hit_selection_conditions):
                selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition)
                n_hits_per_condition[2 + index] += selected_cluster_hits.shape[0]
                column, row, tdc = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC']
                pixel_tdc_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc, shape=(80, 336, max_tdc))
                mean_pixel_tdc_hists_per_condition[index] = np.average(pixel_tdc_hists_per_condition[index], axis=2, weights=range(0, max_tdc)) * np.sum(np.arange(0, max_tdc)) / pixel_tdc_hists_per_condition[index].sum(axis=2)
                tdc_timestamp = selected_cluster_hits['TDC_time_stamp']
                pixel_tdc_timestamp_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc_timestamp, shape=(80, 336, 256))
                mean_pixel_tdc_timestamp_hists_per_condition[index] = np.average(pixel_tdc_timestamp_hists_per_condition[index], axis=2, weights=range(0, 256)) * np.sum(np.arange(0, 256)) / pixel_tdc_timestamp_hists_per_condition[index].sum(axis=2)
                tdc_hists_per_condition[index] = pixel_tdc_hists_per_condition[index].sum(axis=(0, 1))
                tdc_corr_hists_per_condition[index] += analysis_utils.hist_2d_index(tdc, selected_cluster_hits['tot'], shape=(max_tdc, 16))
            progress_bar.update(n_hits_per_condition[0])
        progress_bar.finish()

        # Take TDC calibration if available and calculate charge for each TDC value and pixel
        if calibation_file is not None:
            with tb.openFile(calibation_file, mode="r") as in_file_calibration_h5:
                tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1]
                tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
            charge_calibration = get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
        else:
            charge_calibration = None

        # Store data of result histograms
        with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="w") as out_file_h5:
            for index, condition in enumerate(hit_selection_conditions):
                pixel_tdc_hist_result = np.swapaxes(pixel_tdc_hists_per_condition[index], 0, 1)
                pixel_tdc_timestamp_hist_result = np.swapaxes(pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_hist_result = np.swapaxes(mean_pixel_tdc_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_timestamp_hist_result = np.swapaxes(mean_pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                tdc_hists_per_condition_result = tdc_hists_per_condition[index]
                tdc_corr_hist_result = np.swapaxes(tdc_corr_hists_per_condition[index], 0, 1)
                # Create result hists
                out_1 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcCondition_%d' % index, title='Hist Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_hist_result.dtype), shape=pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcTimestampCondition_%d' % index, title='Hist Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_timestamp_hist_result.dtype), shape=pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcCondition_%d' % index, title='Hist Mean Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_hist_result.dtype), shape=mean_pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_4 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcTimestampCondition_%d' % index, title='Hist Mean Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_timestamp_hist_result.dtype), shape=mean_pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_5 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCondition_%d' % index, title='Hist Tdc with %s' % condition, atom=tb.Atom.from_dtype(tdc_hists_per_condition_result.dtype), shape=tdc_hists_per_condition_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_6 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCorrCondition_%d' % index, title='Hist Correlation Tdc/Tot with %s' % condition, atom=tb.Atom.from_dtype(tdc_corr_hist_result.dtype), shape=tdc_corr_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                # Add result hists information
                out_1.attrs.dimensions, out_1.attrs.condition, out_1.attrs.tdc_values = 'column, row, TDC value', condition, range(max_tdc)
                out_2.attrs.dimensions, out_2.attrs.condition, out_2.attrs.tdc_values = 'column, row, TDC time stamp value', condition, range(256)
                out_3.attrs.dimensions, out_3.attrs.condition = 'column, row, mean TDC value', condition
                out_4.attrs.dimensions, out_4.attrs.condition = 'column, row, mean TDC time stamp value', condition
                out_5.attrs.dimensions, out_5.attrs.condition = 'PlsrDAC', condition
                out_6.attrs.dimensions, out_6.attrs.condition = 'TDC, TOT', condition
                out_1[:], out_2[:], out_3[:], out_4[:], out_5[:], out_6[:] = pixel_tdc_hist_result, pixel_tdc_timestamp_hist_result, mean_pixel_tdc_hist_result, mean_pixel_tdc_timestamp_hist_result, tdc_hists_per_condition_result, tdc_corr_hist_result

                if charge_calibration is not None:
                    # Select only valid pixel for histograming: they have data and a calibration (that is any charge(TDC) calibration != 0)
                    valid_pixel = np.where(np.logical_and(charge_calibration[:, :, :max_tdc].sum(axis=2) > 0, pixel_tdc_hist_result[:, :, :max_tdc].swapaxes(0, 1).sum(axis=2) > 0))

                    mean_charge_calibration = charge_calibration[valid_pixel][:, :max_tdc].mean(axis=0)
                    mean_tdc_hist = pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].mean(axis=0)
                    result_array = np.rec.array(np.column_stack((mean_charge_calibration, mean_tdc_hist)), dtype=[('charge', float), ('count', float)])
                    out_6 = out_file_h5.create_table(out_file_h5.root, name='HistMeanTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with mean charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_6.attrs.condition = condition
                    out_6.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_6.append(result_array)
                    # Create charge histogram with per pixel TDC(charge) calibration
                    x, y = charge_calibration[valid_pixel][:, :max_tdc].ravel(), np.ravel(pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].ravel())
                    y, x = y[x > 0], x[x > 0]  # remove the hit tdcs without proper calibration plsrDAC(TDC) calibration
                    x, y, yerr = analysis_utils.get_profile_histogram(x, y, n_bins=n_bins)
                    result_array = np.rec.array(np.column_stack((x, y, yerr)), dtype=[('charge', float), ('count', float), ('count_error', float)])
                    out_7 = out_file_h5.create_table(out_file_h5.root, name='HistTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with per pixel charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_7.attrs.condition = condition
                    out_7.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_7.append(result_array)

    # Plot Data
    with PdfPages(input_file_hits[:-3] + '_calibrated_tdc_hists.pdf') as output_pdf:
        plot_hits_per_condition(output_pdf)
        with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="r") as in_file_h5:
            for node in in_file_h5.root:  # go through the data and plot them
                if 'MeanPixel' in node.name:
                    try:
                        plot_three_way(np.ma.masked_invalid(node[:]) * 1.5625, title='Mean TDC delay, hits with\n%s' % node._v_attrs.condition if 'Timestamp' in node.name else 'Mean TDC, hits with\n%s' % node._v_attrs.condition, filename=output_pdf)
                    except ValueError:
                        logging.warning('Cannot plot TDC delay')
                elif 'HistTdcCondition' in node.name:
                    hist_1d = node[:]
                    entry_index = np.where(hist_1d != 0)
                    if entry_index[0].shape[0] != 0:
                        max_index = np.amax(entry_index)
                    else:
                        max_index = max_tdc
                    plot_1d_hist(hist_1d[:max_index + 10], title='TDC histogram, hits with\n%s' % node._v_attrs.condition if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits with\n%s' % node._v_attrs.condition, x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf)
                elif 'HistPixelTdc' in node.name:
                    hist_3d = node[:]
                    entry_index = np.where(hist_3d.sum(axis=(0, 1)) != 0)
                    if entry_index[0].shape[0] != 0:
                        max_index = np.amax(entry_index)
                    else:
                        max_index = max_tdc
                    best_pixel_index = np.where(hist_3d.sum(axis=2) == np.amax(node[:].sum(axis=2)))
                    if best_pixel_index[0].shape[0] == 1:  # there could be more than one pixel with most hits
                        plot_1d_hist(hist_3d[best_pixel_index][0, :max_index], title='TDC histogram of pixel %d, %d\n%s' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1, node._v_attrs.condition) if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits of pixel %d, %d' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1), x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf)
                elif 'HistTdcCalibratedCondition' in node.name:
                    plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, per pixel TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf)
                elif 'HistMeanTdcCalibratedCondition' in node.name:
                    plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, mean TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf)
                elif 'HistTdcCorr' in node.name:
                    plot_tdc_tot_correlation(node[:], node._v_attrs.condition, output_pdf)
コード例 #25
0
ファイル: scan_hit_delay.py プロジェクト: PatrickAhl/pyBAR
def analyze_hit_delay(raw_data_file):
    # Interpret data and create hit table
    with AnalyzeRawData(raw_data_file=raw_data_file, create_pdf=False) as analyze_raw_data:
        analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histograming
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.interpreter.set_warning_output(False)  # a lot of data produces unknown words
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        vcal_c0 = analyze_raw_data.vcal_c0
        vcal_c1 = analyze_raw_data.vcal_c1
        c_high = analyze_raw_data.c_high

    # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="w") as out_file_h5:
        hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid')
        hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid')
        hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot')
        hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot')
        hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot')

        def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array):
            logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac))
            bcid_mean_array = np.average(bcid_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype('f4')  # calculate the mean BCID per pixel and scan parameter
            tot_pixel_mean_array = np.average(tot_pixel_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype('f4')  # calculate the mean tot per pixel and scan parameter
            bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1)
            bcid_result = np.swapaxes(bcid_array, 0, 1)
            tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1)
            tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1)

            out = out_file_h5.createCArray(hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out.attrs.dimensions = 'column, row, injection delay'
            out.attrs.injection_delay_values = injection_delay
            out[:] = bcid_mean_result
            out_2 = out_file_h5.createCArray(hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out_2.attrs.dimensions = 'column, row, injection delay, relative bcid'
            out_2.attrs.injection_delay_values = injection_delay
            out_2[:] = bcid_result
            out_3 = out_file_h5.createCArray(hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out_3.attrs.dimensions = 'column, row, injection delay'
            out_3.attrs.injection_delay_values = injection_delay
            out_3[:] = tot_pixel_result
            out_4 = out_file_h5.createCArray(hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out_4.attrs.dimensions = 'column, row, injection delay'
            out_4.attrs.injection_delay_values = injection_delay
            out_4[:] = tot_mean_pixel_result
            out_5 = out_file_h5.createCArray(hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out_5.attrs.injection_delay_values = injection_delay
            out_5[:] = tot_array

        old_plsr_dac = None

        # Get scan parameters from interpreted file
        with tb.open_file(raw_data_file + '_interpreted.h5', 'r') as in_file_h5:
            scan_parameters_dict = get_scan_parameter(in_file_h5.root.meta_data[:])
            plsr_dac = scan_parameters_dict['PlsrDAC']
            hists_folder._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_2._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_3._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_4._v_attrs.plsr_dac_values = plsr_dac
            injection_delay = scan_parameters_dict[scan_parameters_dict.keys()[1]]  # injection delay par name is unknown and should  be in the inner loop
            scan_parameters = scan_parameters_dict.keys()

        bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16)  # bcid array of actual PlsrDAC
        tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16)  # tot pixel array of actual PlsrDAC
        tot_array = np.zeros((16,), dtype=np.uint32)  # tot array of actual PlsrDAC

        logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac))
        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=max(plsr_dac) - min(plsr_dac), term_width=80)

        for index, (parameters, hits) in enumerate(get_hits_of_scan_parameter(raw_data_file + '_interpreted.h5', scan_parameters, try_speedup=True, chunk_size=10000000)):
            if index == 0:
                progress_bar.start()  # start after the event index is created to get reasonable ETA
            actual_plsr_dac, actual_injection_delay = parameters[0], parameters[1]
            column, row, rel_bcid, tot = hits['column'] - 1, hits['row'] - 1, hits['relative_BCID'], hits['tot']
            bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16))
            tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16))
            tot_array_fast = hist_1d_index(tot, shape=(16,))

            if old_plsr_dac != actual_plsr_dac:  # Store the data of the actual PlsrDAC value
                if old_plsr_dac:  # Special case for the first PlsrDAC setting
                    store_bcid_histograms(bcid_array, tot_array, tot_pixel_array)
                    progress_bar.update(old_plsr_dac - min(plsr_dac))
                # Reset the histrograms for the next PlsrDAC setting
                bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16)
                tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16)
                tot_array = np.zeros((16,), dtype=np.uint32)
                old_plsr_dac = actual_plsr_dac
            injection_delay_index = np.where(np.array(injection_delay) == actual_injection_delay)[0][0]
            bcid_array[:, :, injection_delay_index, :] += bcid_array_fast
            tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast
            tot_array += tot_array_fast
        store_bcid_histograms(bcid_array, tot_array, tot_pixel_array)  # save histograms of last PlsrDAC setting
        progress_bar.finish()

    # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as in_file_h5:
        hists_folder = in_file_h5.create_group(in_file_h5.root, 'PixelHistsBcidJumps')
        plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values

        # Info output with progressbar
        logging.info('Detect BCID jumps with pixel based S-Curve fits for PlsrDACs ' + str(plsr_dac_values))
        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(plsr_dac_values), term_width=80)
        progress_bar.start()

        for index, node in enumerate(in_file_h5.root.PixelHistsMeanRelBcid):  # loop over all mean relative BCID hists for all PlsrDAC values and determine the BCID jumps
            actual_plsr_dac = int(re.search(r'\d+', node.name).group())  # actual node plsr dac value
            # Select the S-curves and interpolate Nans
            pixel_data = node[:, :, :]
            pixel_data_fixed = pixel_data.reshape(pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2])  # Reshape for interpolation of Nans
            nans, x = ~np.isfinite(pixel_data_fixed), lambda z: z.nonzero()[0]
            pixel_data_fixed[nans] = np.interp(x(nans), x(~nans), pixel_data_fixed[~nans])  # interpolate Nans
            pixel_data_fixed = pixel_data_fixed.reshape(pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2])  # Reshape after interpolation of Nans

            # Fit all BCID jumps per pixel (1 - 2 jumps expected) with multithreading
            pixel_data_shaped = pixel_data_fixed.reshape(pixel_data_fixed.shape[0] * pixel_data_fixed.shape[1], pixel_data_fixed.shape[2]).tolist()
            pool = mp.Pool()  # create as many workers as physical cores are available
            result_array = np.array(pool.map(fit_bcid_jumps, pixel_data_shaped))
            pool.close()
            pool.join()
            result_array = result_array.reshape(pixel_data_fixed.shape[0], pixel_data_fixed.shape[1], 4)

            # Store array to file
            out = in_file_h5.createCArray(hists_folder, name='PixelHistsBcidJumpsPlsrDac_%03d' % actual_plsr_dac, title='BCID jumps per pixel for PlsrDAC ' + str(actual_plsr_dac), atom=tb.Atom.from_dtype(result_array.dtype), shape=result_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            out.attrs.dimensions = 'column, row, BCID first jump, delay first jump, BCID second jump, delay second jump'
            out[:] = result_array
            progress_bar.update(index)

    # Calibrate the step size of the injection delay and create absolute and relative (=time walk) hit delay histograms
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as out_file_h5:
        # Calculate injection delay step size using the average difference of two Scurves of all pixels and plsrDAC settings and the minimum BCID to fix the absolute time scale
        differences = []
        min_bcid = 15
        for node in out_file_h5.root.PixelHistsBcidJumps:
            pixel_data = node[:, :, :]
        selection = (np.logical_and(pixel_data[:, :, 0] > 0, pixel_data[:, :, 2] > 0))  # select pixels with two Scurve fits
        difference = pixel_data[selection, 3] - pixel_data[selection, 1]  # difference in delay settings between the scurves
        difference = difference[np.logical_and(difference > 15, difference < 60)]  # get rid of bad data
        differences.extend(difference.tolist())
        if np.amin(pixel_data[selection, 0]) < min_bcid:
            min_bcid = np.amin(pixel_data[selection, 0])
        step_size = np.median(differences)  # delay steps needed for 25 ns
        step_size_error = np.std(differences)  # delay steps needed for 25 ns

        # Calculate the hit delay per pixel
        plsr_dac_values = out_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
        hit_delay = np.zeros(shape=(336, 80, len(plsr_dac_values)))  # result array
        for node in out_file_h5.root.PixelHistsBcidJumps:  # loop over all BCID jump hists for all PlsrDAC values to calculate the hit delay
            actual_plsr_dac = int(re.search(r'\d+', node.name).group())  # actual node plsr dac value
            plsr_dac_index = np.where(plsr_dac_values == actual_plsr_dac)[0][0]
            pixel_data = node[:, :, :]
            actual_hit_delay = (pixel_data[:, :, 0] - min_bcid + 1) * 25. - pixel_data[:, :, 1] * 25. / step_size
            hit_delay[:, :, plsr_dac_index] = actual_hit_delay
        hit_delay = np.ma.masked_less(hit_delay, 0)
        timewalk = hit_delay - np.amin(hit_delay, axis=2)[:, :, np.newaxis]  # time walk calc. by normalization to minimum for every pixel

        # Calculate the mean TOT per PlsrDAC (additional information, not needed for hit delay)
        tot = np.zeros(shape=(len(plsr_dac_values),), dtype=np.float16)  # result array
        for node in out_file_h5.root.HistsTot:  # loop over tot hist for all PlsrDAC values
            plsr_dac = int(re.search(r'\d+', node.name).group())
            plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
            tot_data = node[:]
            tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16))

        # Store the data
        out = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk.dtype), shape=timewalk.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay.dtype), shape=hit_delay.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        out.attrs.dimensions = 'column, row, PlsrDAC'
        out.attrs.delay_calibration = step_size
        out.attrs.delay_calibration_error = step_size_error
        out.attrs.plsr_dac_values = plsr_dac_values
        out_2.attrs.dimensions = 'column, row, PlsrDAC'
        out_2.attrs.delay_calibration = step_size
        out_2.attrs.delay_calibration_error = step_size_error
        out_2.attrs.plsr_dac_values = plsr_dac_values
        out_3.attrs.dimensions = 'PlsrDAC'
        out_3.attrs.plsr_dac_values = plsr_dac_values
        out[:] = timewalk.filled(fill_value=np.NaN)
        out_2[:] = hit_delay.filled(fill_value=np.NaN)
        out_3[:] = tot

    # Mask the pixels that have non valid data and create plot with the time walk and hit delay for all pixels
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="r") as in_file_h5:
        def plsr_dac_to_charge(plsr_dac, vcal_c0, vcal_c1, c_high):  # TODO: take PlsrDAC calib from file
            voltage = vcal_c0 + vcal_c1 * plsr_dac
            return voltage * c_high / 0.16022

        def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None):
            # Interpolate tot values for second tot axis
            interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True)
            tot = np.arange(16)
            tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))]

            array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1])
            y = np.mean(array, axis=1)
            y_err = np.std(array, axis=1)

            fig = Figure()
            FigureCanvas(fig)
            ax = fig.add_subplot(111)
            fig.patch.set_facecolor('white')
            ax.grid(True)
            ax.set_xlabel(xlabel)
            ax.set_ylabel(ylabel)
            ax.set_xlim((0, np.amax(charge_values)))
            ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err)))
            ax.plot(charge_values, y, '.-', color='black', label=title)
            if threshold is not None:
                ax.plot([threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold))
            ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS')
            ax2 = ax.twiny()
            ax2.set_xlabel("ToT")

            ticklab = ax2.xaxis.get_ticklabels()[0]
            trans = ticklab.get_transform()
            ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans)
            ax2.set_xlim(ax.get_xlim())
            ax2.set_xticks(interpolation(tot))
            ax2.set_xticklabels([str(int(i)) for i in tot])
            ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes)
            ax.legend()
            filename.savefig(fig)

        plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
        charge_values = plsr_dac_to_charge(np.array(plsr_dac_values), vcal_c0, vcal_c1, c_high)
        hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :]
        hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :]
        tot = in_file_h5.root.HistTotPerPlsrDac[:]

        hist_timewalk = np.ma.masked_invalid(hist_timewalk)
        hist_hit_delay = np.ma.masked_invalid(hist_hit_delay)

        output_pdf = PdfPages(raw_data_file + '_analyzed.pdf')
        plot_hit_delay(np.swapaxes(hist_timewalk, 0, 1), charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot)
        plot_hit_delay(np.swapaxes(hist_hit_delay, 0, 1), charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot)
        plot_scurves(np.swapaxes(hist_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, filename=output_pdf)
        plot_scurves(np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title='Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, filename=output_pdf)

        for i in [0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1]:  # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting
            plot_three_way(hist_timewalk[:, :, i], title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf)
            plot_three_way(hist_hit_delay[:, :, i], title='Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf)
        output_pdf.close()
コード例 #26
0
ファイル: tune_fdac.py プロジェクト: PatrickAhl/pyBAR
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False

        enable_mask_steps = []

        cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", mask_steps=self.mask_steps)[0]

        self.write_target_charge()
        additional_scan = True
        lastBitResult = np.zeros(shape=self.register.get_pixel_register_value("FDAC").shape, dtype=self.register.get_pixel_register_value("FDAC").dtype)

        self.set_start_fdac()

        self.tot_mean_best = np.empty(shape=(80, 336))  # array to store the best occupancy (closest to Ninjections/2) of the pixel
        self.tot_mean_best.fill(0)
        self.fdac_mask_best = self.register.get_pixel_register_value("FDAC")

        for scan_parameter_value, fdac_bit in enumerate(self.fdac_tune_bits):
            if additional_scan:
                self.set_fdac_bit(fdac_bit)
                logging.info('FDAC setting: bit %d = 1', fdac_bit)
            else:
                self.set_fdac_bit(fdac_bit, bit_value=0)
                logging.info('FDAC setting: bit %d = 0', fdac_bit)

            self.write_fdac_config()

            with self.readout(FDAC=scan_parameter_value, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data):
                scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_fdac, mask_steps=self.mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction)

            col_row_tot = np.column_stack(convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_tot_array_from_data_record_array))
            tot_array = np.histogramdd(col_row_tot, bins=(80, 336, 16), range=[[1, 80], [1, 336], [0, 15]])[0]
            tot_mean_array = np.average(tot_array, axis=2, weights=range(0, 16)) * sum(range(0, 16)) / self.n_injections_fdac
            select_better_pixel_mask = abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.target_tot)
            pixel_with_too_small_mean_tot_mask = tot_mean_array < self.target_tot
            self.tot_mean_best[select_better_pixel_mask] = tot_mean_array[select_better_pixel_mask]

            if self.plot_intermediate_steps:
                plot_three_way(hist=tot_mean_array.transpose().transpose(), title="Mean ToT (FDAC tuning bit " + str(fdac_bit) + ")", x_axis_title='mean ToT', filename=self.plots_filename, minimum=0, maximum=15)

            fdac_mask = self.register.get_pixel_register_value("FDAC")
            self.fdac_mask_best[select_better_pixel_mask] = fdac_mask[select_better_pixel_mask]
            if fdac_bit > 0:
                fdac_mask[pixel_with_too_small_mean_tot_mask] = fdac_mask[pixel_with_too_small_mean_tot_mask] & ~(1 << fdac_bit)
                self.register.set_pixel_register_value("FDAC", fdac_mask)

            if fdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    lastBitResult = tot_mean_array.copy()
                    self.fdac_tune_bits.append(0)  # bit 0 has to be scanned twice
                else:
                    fdac_mask[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] = fdac_mask[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] | (1 << fdac_bit)
                    tot_mean_array[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] = lastBitResult[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)]
                    self.tot_mean_best[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] = tot_mean_array[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)]
                    self.fdac_mask_best[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] = fdac_mask[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)]

        self.register.set_pixel_register_value("FDAC", self.fdac_mask_best)  # set value for meta scan
        self.write_fdac_config()
コード例 #27
0
def histogram_tdc_hits(input_file_hits, hit_selection_conditions, event_status_select_mask, event_status_condition, calibration_file=None, correct_calibration=None, max_tdc=1000, ignore_disabled_regions=True, n_bins=200, plot_data=True):
    for condition in hit_selection_conditions:
        logging.info('Histogram TDC hits with %s', condition)

    def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration):  # return the charge from calibration
        charge_calibration = np.zeros(shape=(80, 336, max_tdc))
        for column in range(80):
            for row in range(336):
                actual_pixel_calibration = tdc_pixel_calibration[column, row, :]
                # Only take pixels with at least 3 valid calibration points
                if np.count_nonzero(actual_pixel_calibration != 0) > 2 and np.count_nonzero(np.isfinite(actual_pixel_calibration)) > 2:
                    selected_measurements = np.isfinite(actual_pixel_calibration)  # Select valid calibration steps
                    selected_actual_pixel_calibration = actual_pixel_calibration[selected_measurements]
                    selected_tdc_calibration_values = tdc_calibration_values[selected_measurements]
                    interpolation = interp1d(x=selected_actual_pixel_calibration, y=selected_tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0)
                    charge_calibration[column, row, :] = interpolation(np.arange(max_tdc))
        return charge_calibration

    def plot_tdc_tot_correlation(data, condition, output_pdf):
        logging.info('Plot correlation histogram for %s', condition)
        plt.clf()
        data = np.ma.array(data, mask=(data <= 0))
        if np.ma.any(data > 0):
            cmap = cm.get_cmap('jet', 200)
            cmap.set_bad('w')
            plt.title('Correlation with %s' % condition)
            norm = colors.LogNorm()
            z_max = data.max(fill_value=0)
            plt.xlabel('TDC')
            plt.ylabel('TOT')
            im = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', interpolation='nearest')  # , norm=norm)
            divider = make_axes_locatable(plt.gca())
            plt.gca().invert_yaxis()
            cax = divider.append_axes("right", size="5%", pad=0.1)
            plt.colorbar(im, cax=cax, ticks=np.linspace(start=0, stop=z_max, num=9, endpoint=True))
            output_pdf.savefig()
        else:
            logging.warning('No data for correlation plotting for %s', condition)

    def plot_hits_per_condition(output_pdf):
        logging.info('Plot hits selection efficiency histogram for %d conditions', len(hit_selection_conditions) + 2)
        labels = ['All Hits', 'Hits of\ngood events']
        for condition in hit_selection_conditions:
            condition = re.sub('[&]', '\n', condition)
            condition = re.sub('[()]', '', condition)
            labels.append(condition)
        plt.clf()
        plt.bar(range(len(n_hits_per_condition)), n_hits_per_condition, align='center')
        plt.xticks(range(len(n_hits_per_condition)), labels, size=8)
        plt.title('Number of hits for different cuts')
        plt.yscale('log')
        plt.ylabel('#')
        plt.grid()
        for x, y in zip(np.arange(len(n_hits_per_condition)), n_hits_per_condition):
            plt.annotate('%d' % (float(y) / float(n_hits_per_condition[0]) * 100.) + r'%', xy=(x, y / 2.), xycoords='data', color='grey', size=15)
        output_pdf.savefig()

    def plot_corrected_tdc_hist(x, y, title, output_pdf, point_style='-'):
        logging.info('Plot TDC hist with TDC calibration')
        plt.clf()
        y /= np.amax(y) if y.shape[0] > 0 else y
        plt.plot(x, y, point_style)
        plt.title(title, size=10)
        plt.xlabel('Charge [PlsrDAC]')
        plt.ylabel('Count [a.u.]')
        plt.grid()
        output_pdf.savefig()

    def get_calibration_correction(tdc_calibration, tdc_calibration_values, filename_new_calibration):  # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means
        with tb.open_file(filename_new_calibration, 'r') as in_file_2:
            charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :, 1]

            plsr_dacs = tdc_calibration_values
            if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration._v_attrs.scan_parameter_values):
                raise NotImplementedError('The check calibration file has to have the same PlsrDAC values')

            # Valid pixel have a calibration in the new and the old calibration
            valid_pixel = np.where(~np.all((charge_calibration_1 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_1), axis=2) & ~np.all((charge_calibration_2 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_2), axis=2))
            mean_charge_calibration = np.nanmean(charge_calibration_2[valid_pixel], axis=0)
            offset_mean = np.nanmean((charge_calibration_2[valid_pixel] - charge_calibration_1[valid_pixel]), axis=0)

            dPlsrDAC_dTDC = analysis_utils.smooth_differentiation(plsr_dacs, mean_charge_calibration, order=3, smoothness=0, derivation=1)
            plt.clf()
            plt.plot(plsr_dacs, offset_mean / dPlsrDAC_dTDC, '.-', label='PlsrDAC')
            plt.plot(plsr_dacs, offset_mean, '.-', label='TDC')
            plt.grid()
            plt.xlabel('PlsrDAC')
            plt.ylabel('Mean calibration offset')
            plt.legend(loc=0)
            plt.title('Mean offset between TDC calibration data, new - old ')
            plt.savefig(filename_new_calibration[:-3] + '.pdf')
            plt.show()

            return offset_mean

    def delete_disabled_regions(hits, enable_mask):
        n_hits = hits.shape[0]

        # Tread no hits case
        if n_hits == 0:
            return hits

        # Column, row array with True for disabled pixels
        disabled_region = ~enable_mask.astype(np.bool).T.copy()
        n_disabled_pixels = np.count_nonzero(disabled_region)

        # Extend disabled pixel mask by the neighbouring pixels
        neighbour_pixels = [(-1, 0), (1, 0), (0, -1), (0, 1)]  # Disable direct neighbouring pixels
        for neighbour_pixel in neighbour_pixels:
            disabled_region = np.logical_or(disabled_region, shift(disabled_region, shift=neighbour_pixel, cval=0))

        logging.info('Masking %d additional pixel neighbouring %d disabled pixels', np.count_nonzero(disabled_region) - n_disabled_pixels, n_disabled_pixels)

        # Make 1D selection array with disabled pixels
        disabled_pixels = np.where(disabled_region)
        disabled_pixels_1d = (disabled_pixels[0] + 1) * disabled_region.shape[1] + (disabled_pixels[1] + 1)  # + 1 because pixel index 0,0 has column/row = 1

        hits_1d = hits['column'].astype(np.uint32) * disabled_region.shape[1] + hits['row']  # change dtype to fit new number
        hits = hits[np.in1d(hits_1d, disabled_pixels_1d, invert=True)]

        logging.info('Lost %d hits (%d percent) due to disabling neighbours', n_hits - hits.shape[0], (1. - float(hits.shape[0]) / n_hits) * 100)

        return hits

    # Create data
    with tb.open_file(input_file_hits, mode="r") as in_hit_file_h5:
        cluster_hit_table = in_hit_file_h5.root.ClusterHits
        try:
            enabled_pixels = in_hit_file_h5.root.ClusterHits._v_attrs.enabled_pixels[:]
        except AttributeError:  # Old and simulate data do not have this info
            logging.warning('No enabled pixel mask found in data! Assume all pixels are enabled.')
            enabled_pixels = np.ones(shape=(336, 80))

        # Result hists, initialized per condition
        pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336, max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336, 256), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_hists_per_condition = [np.zeros(shape=(max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_corr_hists_per_condition = [np.zeros(shape=(max_tdc, 16), dtype=np.uint32) for _ in hit_selection_conditions] if hit_selection_conditions else []

        n_hits_per_condition = [0 for _ in range(len(hit_selection_conditions) + 2)]  # condition 1, 2 are all hits, hits of goode events

        logging.info('Select hits and create TDC histograms for %d cut conditions', len(hit_selection_conditions))
        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_hit_table.shape[0], term_width=80)
        progress_bar.start()
        for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=10000000):
            n_hits_per_condition[0] += cluster_hits.shape[0]
            selected_events_cluster_hits = cluster_hits[np.logical_and(cluster_hits['TDC'] < max_tdc, (cluster_hits['event_status'] & event_status_select_mask) == event_status_condition)]
            n_hits_per_condition[1] += selected_events_cluster_hits.shape[0]
            for index, condition in enumerate(hit_selection_conditions):
                selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition)
                if ignore_disabled_regions:
                    selected_cluster_hits = delete_disabled_regions(hits=selected_cluster_hits, enable_mask=enabled_pixels)

                n_hits_per_condition[2 + index] += selected_cluster_hits.shape[0]
                column, row, tdc = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC']
                pixel_tdc_hists_per_condition[index] += fast_analysis_utils.hist_3d_index(column, row, tdc, shape=(80, 336, max_tdc))
                mean_pixel_tdc_hists_per_condition[index] = np.average(pixel_tdc_hists_per_condition[index], axis=2, weights=range(0, max_tdc)) * np.sum(np.arange(0, max_tdc)) / pixel_tdc_hists_per_condition[index].sum(axis=2)
                tdc_timestamp = selected_cluster_hits['TDC_time_stamp']
                pixel_tdc_timestamp_hists_per_condition[index] += fast_analysis_utils.hist_3d_index(column, row, tdc_timestamp, shape=(80, 336, 256))
                mean_pixel_tdc_timestamp_hists_per_condition[index] = np.average(pixel_tdc_timestamp_hists_per_condition[index], axis=2, weights=range(0, 256)) * np.sum(np.arange(0, 256)) / pixel_tdc_timestamp_hists_per_condition[index].sum(axis=2)
                tdc_hists_per_condition[index] = pixel_tdc_hists_per_condition[index].sum(axis=(0, 1))
                tdc_corr_hists_per_condition[index] += fast_analysis_utils.hist_2d_index(tdc, selected_cluster_hits['tot'], shape=(max_tdc, 16))
            progress_bar.update(n_hits_per_condition[0])
        progress_bar.finish()

        # Take TDC calibration if available and calculate charge for each TDC value and pixel
        if calibration_file is not None:
            with tb.open_file(calibration_file, mode="r") as in_file_calibration_h5:
                tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1]
                tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
                if correct_calibration is not None:
                    tdc_calibration += get_calibration_correction(tdc_calibration, tdc_calibration_values, correct_calibration)
            charge_calibration = get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
        else:
            charge_calibration = None

        # Store data of result histograms
        with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="w") as out_file_h5:
            for index, condition in enumerate(hit_selection_conditions):
                pixel_tdc_hist_result = np.swapaxes(pixel_tdc_hists_per_condition[index], 0, 1)
                pixel_tdc_timestamp_hist_result = np.swapaxes(pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_hist_result = np.swapaxes(mean_pixel_tdc_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_timestamp_hist_result = np.swapaxes(mean_pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                tdc_hists_per_condition_result = tdc_hists_per_condition[index]
                tdc_corr_hist_result = np.swapaxes(tdc_corr_hists_per_condition[index], 0, 1)
                # Create result hists
                out_1 = out_file_h5.create_carray(out_file_h5.root, name='HistPixelTdcCondition_%d' % index, title='Hist Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_hist_result.dtype), shape=pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_2 = out_file_h5.create_carray(out_file_h5.root, name='HistPixelTdcTimestampCondition_%d' % index, title='Hist Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_timestamp_hist_result.dtype), shape=pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_3 = out_file_h5.create_carray(out_file_h5.root, name='HistMeanPixelTdcCondition_%d' % index, title='Hist Mean Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_hist_result.dtype), shape=mean_pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_4 = out_file_h5.create_carray(out_file_h5.root, name='HistMeanPixelTdcTimestampCondition_%d' % index, title='Hist Mean Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_timestamp_hist_result.dtype), shape=mean_pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_5 = out_file_h5.create_carray(out_file_h5.root, name='HistTdcCondition_%d' % index, title='Hist Tdc with %s' % condition, atom=tb.Atom.from_dtype(tdc_hists_per_condition_result.dtype), shape=tdc_hists_per_condition_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_6 = out_file_h5.create_carray(out_file_h5.root, name='HistTdcCorrCondition_%d' % index, title='Hist Correlation Tdc/Tot with %s' % condition, atom=tb.Atom.from_dtype(tdc_corr_hist_result.dtype), shape=tdc_corr_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                # Add result hists information
                out_1.attrs.dimensions, out_1.attrs.condition, out_1.attrs.tdc_values = 'column, row, TDC value', condition, range(max_tdc)
                out_2.attrs.dimensions, out_2.attrs.condition, out_2.attrs.tdc_values = 'column, row, TDC time stamp value', condition, range(256)
                out_3.attrs.dimensions, out_3.attrs.condition = 'column, row, mean TDC value', condition
                out_4.attrs.dimensions, out_4.attrs.condition = 'column, row, mean TDC time stamp value', condition
                out_5.attrs.dimensions, out_5.attrs.condition = 'PlsrDAC', condition
                out_6.attrs.dimensions, out_6.attrs.condition = 'TDC, TOT', condition
                out_1[:], out_2[:], out_3[:], out_4[:], out_5[:], out_6[:] = pixel_tdc_hist_result, pixel_tdc_timestamp_hist_result, mean_pixel_tdc_hist_result, mean_pixel_tdc_timestamp_hist_result, tdc_hists_per_condition_result, tdc_corr_hist_result

                if charge_calibration is not None:
                    # Select only valid pixel for histogramming: they have data and a calibration (that is any charge(TDC) calibration != 0)
                    valid_pixel = np.where(np.logical_and(charge_calibration[:, :, :max_tdc].sum(axis=2) > 0, pixel_tdc_hist_result[:, :, :max_tdc].swapaxes(0, 1).sum(axis=2) > 0))
                    # Create charge histogram with mean TDC calibration
                    mean_charge_calibration = charge_calibration[valid_pixel][:, :max_tdc].mean(axis=0)
                    mean_tdc_hist = pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].mean(axis=0)
                    result_array = np.rec.array(np.column_stack((mean_charge_calibration, mean_tdc_hist)), dtype=[('charge', float), ('count', float)])
                    out_7 = out_file_h5.create_table(out_file_h5.root, name='HistMeanTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with mean charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_7.attrs.condition = condition
                    out_7.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_7.attrs.n_hits = pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].sum()
                    out_7.append(result_array)
                    # Create charge histogram with per pixel TDC calibration
                    x, y = charge_calibration[valid_pixel][:, :max_tdc].ravel(), np.ravel(pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].ravel())
                    y_hist, x_hist = y[x > 0], x[x > 0]  # remove the hit tdcs without proper calibration plsrDAC(TDC) calibration
                    x, y, yerr = analysis_utils.get_profile_histogram(x_hist, y_hist, n_bins=n_bins)
                    result_array = np.rec.array(np.column_stack((x, y, yerr)), dtype=[('charge', float), ('count', float), ('count_error', float)])
                    out_8 = out_file_h5.create_table(out_file_h5.root, name='HistTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with per pixel charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_8.attrs.condition = condition
                    out_8.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_8.attrs.n_hits = y_hist.sum()
                    out_8.append(result_array)

    # Plot Data
    if plot_data:
        with PdfPages(input_file_hits[:-3] + '_calibrated_tdc_hists.pdf') as output_pdf:
            plot_hits_per_condition(output_pdf)
            with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="r") as in_file_h5:
                for node in in_file_h5.root:  # go through the data and plot them
                    if 'MeanPixel' in node.name:
                        try:
                            plot_three_way(np.ma.masked_invalid(node[:]) * 1.5625, title='Mean TDC delay, hits with\n%s' % node._v_attrs.condition[:80] if 'Timestamp' in node.name else 'Mean TDC, hits with\n%s' % node._v_attrs.condition[:80], filename=output_pdf)
                        except ValueError:
                            logging.warning('Cannot plot TDC delay')
                    elif 'HistTdcCondition' in node.name:
                        hist_1d = node[:]
                        entry_index = np.where(hist_1d != 0)
                        if entry_index[0].shape[0] != 0:
                            max_index = np.amax(entry_index)
                        else:
                            max_index = max_tdc
                        plot_1d_hist(hist_1d[:max_index + 10], title='TDC histogram, hits with\n%s' % node._v_attrs.condition[:80] if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits with\n%s' % node._v_attrs.condition[:80], x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf)
                    elif 'HistPixelTdc' in node.name:
                        hist_3d = node[:]
                        entry_index = np.where(hist_3d.sum(axis=(0, 1)) != 0)
                        if entry_index[0].shape[0] != 0:
                            max_index = np.amax(entry_index)
                        else:
                            max_index = max_tdc
                        best_pixel_index = np.where(hist_3d.sum(axis=2) == np.amax(node[:].sum(axis=2)))
                        if best_pixel_index[0].shape[0] == 1:  # there could be more than one pixel with most hits
                            try:
                                plot_1d_hist(hist_3d[best_pixel_index][0, :max_index], title='TDC histogram of pixel %d, %d\n%s' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1, node._v_attrs.condition[:80]) if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits of pixel %d, %d' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1), x_axis_title='TDC' if 'Timestamp' not in node.name[:80] else 'TDC time stamp', filename=output_pdf)
                            except IndexError:
                                logging.warning('Cannot plot pixel TDC histogram')
                    elif 'HistTdcCalibratedCondition' in node.name:
                        plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, per pixel TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition[:80]), output_pdf=output_pdf)
                    elif 'HistMeanTdcCalibratedCondition' in node.name:
                        plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, mean TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition[:80]), output_pdf=output_pdf)
                    elif 'HistTdcCorr' in node.name:
                        plot_tdc_tot_correlation(node[:], node._v_attrs.condition, output_pdf)
コード例 #28
0
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False

        cal_lvl1_command = self.register.get_commands(
            "CAL")[0] + self.register.get_commands(
                "zeros", length=40)[0] + self.register.get_commands("LV1")[0]

        self.write_target_threshold()

        scan_parameter_range = [
            (2**self.register.global_registers['Vthin_AltFine']['bitlength']),
            0
        ]  # high to low
        if self.scan_parameters.GDAC[0]:
            scan_parameter_range[0] = self.scan_parameters.GDAC[0]
        if self.scan_parameters.GDAC[1]:
            scan_parameter_range[1] = self.scan_parameters.GDAC[1]
        scan_parameter_range = range(scan_parameter_range[0],
                                     scan_parameter_range[1] - 1,
                                     self.step_size)
        logging.info("Scanning %s from %d to %d", 'GDAC',
                     scan_parameter_range[0], scan_parameter_range[-1])

        def bits_set(int_type):
            int_type = int(int_type)
            position = 0
            bits_set = []
            while (int_type):
                if (int_type & 1):
                    bits_set.append(position)
                position += 1
                int_type = int_type >> 1
            return bits_set

        # calculate selected pixels from the mask and the disabled columns
        select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)
        self.occ_array_sel_pixels_best = select_mask_array.copy()
        self.occ_array_desel_pixels_best = select_mask_array.copy()
        if not self.enable_mask_steps_gdac:
            self.enable_mask_steps_gdac = range(self.mask_steps)
        for mask_step in self.enable_mask_steps_gdac:
            select_mask_array += make_pixel_mask(steps=self.mask_steps,
                                                 shift=mask_step)
        for column in bits_set(
                self.register.get_global_register_value("DisableColumnCnfg")):
            logging.info('Deselect double column %d' % column)
            select_mask_array[column, :] = 0

        occupancy_best = 0.0
        median_occupancy_last_step = 0.0
        gdac_best = self.register_utils.get_gdac()
        for gdac_scan_step, scan_parameter_value in enumerate(
                scan_parameter_range):
            self.register_utils.set_gdac(scan_parameter_value)
            with self.readout(GDAC=scan_parameter_value,
                              reset_sram_fifo=True,
                              fill_buffer=True,
                              clear_buffer=True,
                              callback=self.handle_data):
                scan_loop(self,
                          command=cal_lvl1_command,
                          repeat_command=self.n_injections_gdac,
                          mask_steps=self.mask_steps,
                          enable_mask_steps=self.enable_mask_steps_gdac,
                          enable_double_columns=None,
                          same_mask_for_all_dc=self.same_mask_for_all_dc,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            occupancy_array, _, _ = np.histogram2d(*convert_data_array(
                data_array_from_data_iterable(self.fifo_readout.data),
                filter_func=logical_and(is_fe_word, is_data_record),
                converter_func=get_col_row_array_from_data_record_array),
                                                   bins=(80, 336),
                                                   range=[[1, 80], [1, 336]])
            occ_array_sel_pixels = np.ma.array(
                occupancy_array,
                mask=np.logical_not(np.ma.make_mask(select_mask_array))
            )  # take only selected pixel into account by using the mask
            occ_array_desel_pixels = np.ma.array(
                occupancy_array, mask=np.ma.make_mask(select_mask_array)
            )  # take only de-selected pixel into account by using the inverted mask
            median_occupancy = np.ma.median(occ_array_sel_pixels)
            noise_occupancy = np.ma.median(occ_array_desel_pixels)
            occupancy_almost_zero = np.allclose(median_occupancy, 0)
            no_noise = np.allclose(noise_occupancy, 0)
            if no_noise and not occupancy_almost_zero and abs(
                    median_occupancy - self.n_injections_gdac /
                    2) < abs(occupancy_best - self.n_injections_gdac / 2):
                occupancy_best = median_occupancy
                gdac_best = self.register_utils.get_gdac()
                self.occ_array_sel_pixels_best = occ_array_sel_pixels.copy()
                self.occ_array_desel_pixels_best = occ_array_desel_pixels.copy(
                )

            if self.plot_intermediate_steps:
                plot_three_way(self.occ_array_sel_pixel.transpose(),
                               title="Occupancy (GDAC " +
                               str(scan_parameter_value) + ")",
                               x_axis_title='Occupancy',
                               filename=self.plots_filename,
                               maximum=self.n_injections_gdac)

            if no_noise and not occupancy_almost_zero and median_occupancy >= median_occupancy_last_step and median_occupancy >= self.n_injections_gdac / 2:
                break
            if no_noise and not occupancy_almost_zero:
                median_occupancy_last_step = median_occupancy
            else:
                median_occupancy_last_step = 0.0

        self.register_utils.set_gdac(gdac_best, send_command=False)
        median_occupancy = occupancy_best
        self.gdac_best = self.register_utils.get_gdac()

        if abs(median_occupancy -
               self.n_injections_gdac / 2) > self.max_delta_threshold:
            if np.all((((self.gdac_best & (1 << np.arange(
                    self.register.global_registers['Vthin_AltFine']
                ['bitlength'] + self.register.global_registers['Vthin_AltFine']
                ['bitlength'])))) > 0).astype(int) == 0):
                if self.fail_on_warning:
                    raise RuntimeWarning(
                        'Selected GDAC bits reached minimum value')
                else:
                    logging.warning('Selected GDAC bits reached minimum value')
            else:
                if self.fail_on_warning:
                    raise RuntimeWarning(
                        'Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d'
                        % (abs(median_occupancy - self.n_injections_gdac / 2),
                           self.max_delta_threshold,
                           self.register.get_global_register_value(
                               "Vthin_AltCoarse"),
                           self.register.get_global_register_value(
                               "Vthin_AltFine")))
                else:
                    logging.warning(
                        'Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                        abs(median_occupancy - self.n_injections_gdac / 2),
                        self.max_delta_threshold,
                        self.register.get_global_register_value(
                            "Vthin_AltCoarse"),
                        self.register.get_global_register_value(
                            "Vthin_AltFine"))
        else:
            logging.info(
                'Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"))
コード例 #29
0
def analyze_hit_delay(raw_data_file):
    # Interpret data and create hit table
    with AnalyzeRawData(raw_data_file=raw_data_file,
                        create_pdf=False) as analyze_raw_data:
        analyze_raw_data.create_occupancy_hist = False  # Too many scan parameters to do in ram histogramming
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.interpreter.set_warning_output(
            False)  # A lot of data produces unknown words
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        # Store calibration values in variables
        vcal_c0 = analyze_raw_data.vcal_c0
        vcal_c1 = analyze_raw_data.vcal_c1
        c_high = analyze_raw_data.c_high

    # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="w") as out_file_h5:
        hists_folder = out_file_h5.create_group(out_file_h5.root,
                                                'PixelHistsMeanRelBcid')
        hists_folder_2 = out_file_h5.create_group(out_file_h5.root,
                                                  'PixelHistsRelBcid')
        hists_folder_3 = out_file_h5.create_group(out_file_h5.root,
                                                  'PixelHistsTot')
        hists_folder_4 = out_file_h5.create_group(out_file_h5.root,
                                                  'PixelHistsMeanTot')
        hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot')

        def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array):
            logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac))
            bcid_mean_array = np.average(
                bcid_array, axis=3, weights=range(0, 16)
            ) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype(
                'f4')  # calculate the mean BCID per pixel and scan parameter
            tot_pixel_mean_array = np.average(
                tot_pixel_array, axis=3, weights=range(0, 16)
            ) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype(
                'f4')  # calculate the mean tot per pixel and scan parameter
            bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1)
            bcid_result = np.swapaxes(bcid_array, 0, 1)
            tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1)
            tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1)

            out = out_file_h5.create_carray(
                hists_folder,
                name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac,
                title=
                'Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC '
                + str(old_plsr_dac),
                atom=tb.Atom.from_dtype(bcid_mean_result.dtype),
                shape=bcid_mean_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out.attrs.dimensions = 'column, row, injection delay'
            out.attrs.injection_delay_values = injection_delay
            out[:] = bcid_mean_result
            out_2 = out_file_h5.create_carray(
                hists_folder_2,
                name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac,
                title=
                'Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC '
                + str(old_plsr_dac),
                atom=tb.Atom.from_dtype(bcid_result.dtype),
                shape=bcid_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_2.attrs.dimensions = 'column, row, injection delay, relative bcid'
            out_2.attrs.injection_delay_values = injection_delay
            out_2[:] = bcid_result
            out_3 = out_file_h5.create_carray(
                hists_folder_3,
                name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac,
                title=
                'Tot hist per pixel and different PlsrDAC delays for PlsrDAC '
                + str(old_plsr_dac),
                atom=tb.Atom.from_dtype(tot_pixel_result.dtype),
                shape=tot_pixel_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_3.attrs.dimensions = 'column, row, injection delay'
            out_3.attrs.injection_delay_values = injection_delay
            out_3[:] = tot_pixel_result
            out_4 = out_file_h5.create_carray(
                hists_folder_4,
                name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac,
                title=
                'Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC '
                + str(old_plsr_dac),
                atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype),
                shape=tot_mean_pixel_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_4.attrs.dimensions = 'column, row, injection delay'
            out_4.attrs.injection_delay_values = injection_delay
            out_4[:] = tot_mean_pixel_result
            out_5 = out_file_h5.create_carray(
                hists_folder_5,
                name='HistTotPlsrDac_%03d' % old_plsr_dac,
                title='Tot histogram for PlsrDAC ' + str(old_plsr_dac),
                atom=tb.Atom.from_dtype(tot_array.dtype),
                shape=tot_array.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_5.attrs.injection_delay_values = injection_delay
            out_5[:] = tot_array

        old_plsr_dac = None

        # Get scan parameters from interpreted file
        with tb.open_file(raw_data_file + '_interpreted.h5',
                          'r') as in_file_h5:
            scan_parameters_dict = get_scan_parameter(
                in_file_h5.root.meta_data[:])
            plsr_dac = scan_parameters_dict['PlsrDAC']
            hists_folder._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_2._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_3._v_attrs.plsr_dac_values = plsr_dac
            hists_folder_4._v_attrs.plsr_dac_values = plsr_dac
            injection_delay = scan_parameters_dict[scan_parameters_dict.keys(
            )[1]]  # injection delay par name is unknown and should be in the inner loop
            scan_parameters = scan_parameters_dict.keys()

        bcid_array = np.zeros((80, 336, len(injection_delay), 16),
                              dtype=np.uint16)  # bcid array of actual PlsrDAC
        tot_pixel_array = np.zeros(
            (80, 336, len(injection_delay), 16),
            dtype=np.uint16)  # tot pixel array of actual PlsrDAC
        tot_array = np.zeros((16, ),
                             dtype=np.uint32)  # tot array of actual PlsrDAC

        logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac))
        progress_bar = progressbar.ProgressBar(widgets=[
            '',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='*', left='|', right='|'), ' ',
            progressbar.AdaptiveETA()
        ],
                                               maxval=max(plsr_dac) -
                                               min(plsr_dac),
                                               term_width=80)

        for index, (parameters, hits) in enumerate(
                get_hits_of_scan_parameter(raw_data_file + '_interpreted.h5',
                                           scan_parameters,
                                           try_speedup=True,
                                           chunk_size=10000000)):
            if index == 0:
                progress_bar.start(
                )  # Start after the event index is created to get reasonable ETA
            actual_plsr_dac, actual_injection_delay = parameters[
                0], parameters[1]
            column, row, rel_bcid, tot = hits['column'] - 1, hits[
                'row'] - 1, hits['relative_BCID'], hits['tot']
            bcid_array_fast = hist_3d_index(column,
                                            row,
                                            rel_bcid,
                                            shape=(80, 336, 16))
            tot_pixel_array_fast = hist_3d_index(column,
                                                 row,
                                                 tot,
                                                 shape=(80, 336, 16))
            tot_array_fast = hist_1d_index(tot, shape=(16, ))

            if old_plsr_dac != actual_plsr_dac:  # Store the data of the actual PlsrDAC value
                if old_plsr_dac:  # Special case for the first PlsrDAC setting
                    store_bcid_histograms(bcid_array, tot_array,
                                          tot_pixel_array)
                    progress_bar.update(old_plsr_dac - min(plsr_dac))
                # Reset the histrograms for the next PlsrDAC setting
                bcid_array = np.zeros((80, 336, len(injection_delay), 16),
                                      dtype=np.uint16)
                tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16),
                                           dtype=np.uint16)
                tot_array = np.zeros((16, ), dtype=np.uint32)
                old_plsr_dac = actual_plsr_dac
            injection_delay_index = np.where(
                np.array(injection_delay) == actual_injection_delay)[0][0]
            bcid_array[:, :, injection_delay_index, :] += bcid_array_fast
            tot_pixel_array[:, :,
                            injection_delay_index, :] += tot_pixel_array_fast
            tot_array += tot_array_fast
        store_bcid_histograms(
            bcid_array, tot_array,
            tot_pixel_array)  # save histograms of last PlsrDAC setting
        progress_bar.finish()

    # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as in_file_h5:
        hists_folder = in_file_h5.create_group(in_file_h5.root,
                                               'PixelHistsBcidJumps')
        plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values

        # Info output with progressbar
        logging.info(
            'Detect BCID jumps with pixel based S-Curve fits for PlsrDACs ' +
            str(plsr_dac_values))
        progress_bar = progressbar.ProgressBar(widgets=[
            '',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='*', left='|', right='|'), ' ',
            progressbar.AdaptiveETA()
        ],
                                               maxval=len(plsr_dac_values),
                                               term_width=80)
        progress_bar.start()

        for index, node in enumerate(
                in_file_h5.root.PixelHistsMeanRelBcid
        ):  # loop over all mean relative BCID hists for all PlsrDAC values and determine the BCID jumps
            actual_plsr_dac = int(re.search(
                r'\d+', node.name).group())  # actual node plsr dac value
            # Select the S-curves and interpolate Nans
            pixel_data = node[:, :, :]
            pixel_data_fixed = pixel_data.reshape(
                pixel_data.shape[0] * pixel_data.shape[1] *
                pixel_data.shape[2])  # Reshape for interpolation of Nans
            nans, x = ~np.isfinite(pixel_data_fixed), lambda z: z.nonzero()[0]
            pixel_data_fixed[nans] = np.interp(
                x(nans), x(~nans), pixel_data_fixed[~nans])  # interpolate Nans
            pixel_data_fixed = pixel_data_fixed.reshape(
                pixel_data.shape[0], pixel_data.shape[1],
                pixel_data.shape[2])  # Reshape after interpolation of Nans

            # Fit all BCID jumps per pixel (1 - 2 jumps expected) with multithreading
            pixel_data_shaped = pixel_data_fixed.reshape(
                pixel_data_fixed.shape[0] * pixel_data_fixed.shape[1],
                pixel_data_fixed.shape[2]).tolist()
            pool = mp.Pool(
            )  # create as many workers as physical cores are available
            result_array = np.array(pool.map(fit_bcid_jumps,
                                             pixel_data_shaped))
            pool.close()
            pool.join()
            result_array = result_array.reshape(pixel_data_fixed.shape[0],
                                                pixel_data_fixed.shape[1], 4)

            # Store array to file
            out = in_file_h5.create_carray(
                hists_folder,
                name='PixelHistsBcidJumpsPlsrDac_%03d' % actual_plsr_dac,
                title='BCID jumps per pixel for PlsrDAC ' +
                str(actual_plsr_dac),
                atom=tb.Atom.from_dtype(result_array.dtype),
                shape=result_array.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out.attrs.dimensions = 'column, row, BCID first jump, delay first jump, BCID second jump, delay second jump'
            out[:] = result_array
            progress_bar.update(index)

    # Calibrate the step size of the injection delay and create absolute and relative (=time walk) hit delay histograms
    with tb.open_file(raw_data_file + '_analyzed.h5',
                      mode="r+") as out_file_h5:
        # Calculate injection delay step size using the average difference of two Scurves of all pixels and plsrDAC settings and the minimum BCID to fix the absolute time scale
        differences = np.zeros(
            shape=(336, 80,
                   sum(1 for _ in out_file_h5.root.PixelHistsBcidJumps)),
            dtype=np.float)
        min_bcid = 15
        for index, node in enumerate(
                out_file_h5.root.PixelHistsBcidJumps
        ):  # Loop to get last node (the node with most charge injected)
            pixel_data = node[:, :, :]
            selection = (np.logical_and(pixel_data[:, :, 0] > 0,
                                        pixel_data[:, :, 2] > 0)
                         )  # select pixels with two Scurve fits
            difference = np.zeros_like(differences[:, :, 0])
            difference[selection] = pixel_data[selection, 3] - pixel_data[
                selection,
                1]  # Difference in delay settings between the scurves
            difference[np.logical_or(
                difference < 15, difference > 60
            )] = 0  # Get rid of bad data leading to difference that is too small / large
            differences[:, :, index] = difference
            if np.any(pixel_data[selection, 0]) and np.min(
                    pixel_data[selection, 0]
            ) < min_bcid:  # Search for the minimum rel. BCID delay (= fastes hits)
                min_bcid = np.amin(pixel_data[selection, 0])

        differences = np.ma.masked_where(
            np.logical_or(differences == 0, ~np.isfinite(differences)),
            differences)

        step_size = np.ma.median(differences)  # Delay steps needed for 25 ns
        step_size_error = np.ma.std(
            differences)  # Delay steps needed for 25 ns

        logging.info(
            'Mean step size for the PLsrDAC delay is %1.2f +-  %1.2f ns',
            25. / step_size, 25. / step_size**2 * step_size_error)

        # Calculate the hit delay per pixel
        plsr_dac_values = out_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
        hit_delay = np.zeros(shape=(336, 80, len(plsr_dac_values)),
                             dtype=np.float)  # Result array
        for node in out_file_h5.root.PixelHistsBcidJumps:  # loop over all BCID jump hists for all PlsrDAC values to calculate the hit delay
            actual_plsr_dac = int(re.search(
                r'\d+', node.name).group())  # actual node plsr dac value
            plsr_dac_index = np.where(plsr_dac_values == actual_plsr_dac)[0][0]
            pixel_data = node[:, :, :]
            actual_hit_delay = (pixel_data[:, :, 0] - min_bcid + 1
                                ) * 25. - pixel_data[:, :, 1] * 25. / step_size
            hit_delay[:, :, plsr_dac_index] = actual_hit_delay
        hit_delay = np.ma.masked_less(hit_delay, 0)
        timewalk = hit_delay - np.amin(
            hit_delay, axis=2
        )[:, :, np.
          newaxis]  # Time walk calc. by normalization to minimum hit delay for every pixel

        # Calculate the mean TOT per PlsrDAC (additional information, not needed for hit delay)
        tot = np.zeros(shape=(len(plsr_dac_values), ),
                       dtype=np.float16)  # Result array
        for node in out_file_h5.root.HistsTot:  # Loop over tot hist for all PlsrDAC values
            plsr_dac = int(re.search(r'\d+', node.name).group())
            plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
            tot_data = node[:]
            tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16))

        # Store the data
        out = out_file_h5.create_carray(
            out_file_h5.root,
            name='HistPixelTimewalkPerPlsrDac',
            title='Time walk per pixel and PlsrDAC',
            atom=tb.Atom.from_dtype(timewalk.dtype),
            shape=timewalk.shape,
            filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        out_2 = out_file_h5.create_carray(
            out_file_h5.root,
            name='HistPixelHitDelayPerPlsrDac',
            title='Hit delay per pixel and PlsrDAC',
            atom=tb.Atom.from_dtype(hit_delay.dtype),
            shape=hit_delay.shape,
            filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        out_3 = out_file_h5.create_carray(out_file_h5.root,
                                          name='HistTotPerPlsrDac',
                                          title='Tot per PlsrDAC',
                                          atom=tb.Atom.from_dtype(tot.dtype),
                                          shape=tot.shape,
                                          filters=tb.Filters(complib='blosc',
                                                             complevel=5,
                                                             fletcher32=False))
        out.attrs.dimensions = 'column, row, PlsrDAC'
        out.attrs.delay_calibration = step_size
        out.attrs.delay_calibration_error = step_size_error
        out.attrs.plsr_dac_values = plsr_dac_values
        out_2.attrs.dimensions = 'column, row, PlsrDAC'
        out_2.attrs.delay_calibration = step_size
        out_2.attrs.delay_calibration_error = step_size_error
        out_2.attrs.plsr_dac_values = plsr_dac_values
        out_3.attrs.dimensions = 'PlsrDAC'
        out_3.attrs.plsr_dac_values = plsr_dac_values
        out[:] = timewalk.filled(fill_value=np.NaN)
        out_2[:] = hit_delay.filled(fill_value=np.NaN)
        out_3[:] = tot

    # Mask the pixels that have non valid data and create plots with the time walk and hit delay for all pixels
    with tb.open_file(raw_data_file + '_analyzed.h5', mode="r") as in_file_h5:

        def plsr_dac_to_charge(
                plsr_dac, vcal_c0, vcal_c1,
                c_high):  # Calibration values are taken from file
            voltage = vcal_c0 + vcal_c1 * plsr_dac
            return voltage * c_high / 0.16022

        def plot_hit_delay(hist_3d,
                           charge_values,
                           title,
                           xlabel,
                           ylabel,
                           filename,
                           threshold=None,
                           tot_values=None):
            # Interpolate tot values for second tot axis
            interpolation = interp1d(tot_values,
                                     charge_values,
                                     kind='slinear',
                                     bounds_error=True)
            tot = np.arange(16)
            tot = tot[np.logical_and(tot >= np.min(tot_values),
                                     tot <= np.max(tot_values))]

            array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(
                hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1])
            y = np.mean(array, axis=1)
            y_err = np.std(array, axis=1)

            fig = Figure()
            FigureCanvas(fig)
            ax = fig.add_subplot(111)
            fig.patch.set_facecolor('white')
            ax.grid(True)
            ax.set_xlabel(xlabel)
            ax.set_ylabel(ylabel)
            ax.set_xlim((0, np.max(charge_values)))
            ax.set_ylim((np.min(y - y_err), np.max(y + y_err)))
            ax.plot(charge_values, y, '.-', color='black', label=title)
            if threshold is not None:
                ax.plot(
                    [threshold, threshold],
                    [np.min(y - y_err), np.max(y + y_err)],
                    linestyle='--',
                    color='black',
                    label='Threshold\n%d e' % (threshold))
            ax.fill_between(charge_values,
                            y - y_err,
                            y + y_err,
                            color='gray',
                            alpha=0.5,
                            facecolor='gray',
                            label='RMS')
            ax2 = ax.twiny()
            ax2.set_xlabel("ToT")

            ticklab = ax2.xaxis.get_ticklabels()[0]
            trans = ticklab.get_transform()
            ax2.xaxis.set_label_coords(np.max(charge_values),
                                       1,
                                       transform=trans)
            ax2.set_xlim(ax.get_xlim())
            ax2.set_xticks(interpolation(tot))
            ax2.set_xticklabels([str(int(i)) for i in tot])
            ax.text(0.5,
                    1.07,
                    title,
                    horizontalalignment='center',
                    fontsize=18,
                    transform=ax2.transAxes)
            ax.legend()
            filename.savefig(fig)

        plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
        charge_values = plsr_dac_to_charge(np.array(plsr_dac_values), vcal_c0,
                                           vcal_c1, c_high)
        hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :]
        hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :]
        tot = in_file_h5.root.HistTotPerPlsrDac[:]

        hist_timewalk = np.ma.masked_invalid(hist_timewalk)
        hist_hit_delay = np.ma.masked_invalid(hist_hit_delay)

        output_pdf = PdfPages(raw_data_file + '_analyzed.pdf')
        plot_hit_delay(np.swapaxes(hist_timewalk, 0, 1),
                       charge_values=charge_values,
                       title='Time walk',
                       xlabel='Charge [e]',
                       ylabel='Time walk [ns]',
                       filename=output_pdf,
                       threshold=np.amin(charge_values),
                       tot_values=tot)
        plot_hit_delay(np.swapaxes(hist_hit_delay, 0, 1),
                       charge_values=charge_values,
                       title='Hit delay',
                       xlabel='Charge [e]',
                       ylabel='Hit delay [ns]',
                       filename=output_pdf,
                       threshold=np.amin(charge_values),
                       tot_values=tot)
        plot_scurves(np.swapaxes(hist_timewalk, 0, 1),
                     scan_parameters=charge_values,
                     title='Timewalk of the FE-I4',
                     scan_parameter_name='Charge [e]',
                     ylabel='Timewalk [ns]',
                     min_x=0,
                     filename=output_pdf)
        plot_scurves(
            np.swapaxes(hist_hit_delay[:, :, :], 0, 1),
            scan_parameters=charge_values,
            title='Hit delay (T0) with internal charge injection\nof the FE-I4',
            scan_parameter_name='Charge [e]',
            ylabel='Hit delay [ns]',
            min_x=0,
            filename=output_pdf)

        for i in [
                0, 1,
                len(plsr_dac_values) / 4,
                len(plsr_dac_values) / 2, -1
        ]:  # Plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting
            plot_three_way(hist_timewalk[:, :, i],
                           title='Time walk at %.0f e' % (charge_values[i]),
                           x_axis_title='Time walk [ns]',
                           filename=output_pdf)
            plot_three_way(
                hist_hit_delay[:, :, i],
                title='Hit delay (T0) with internal charge injection at %.0f e'
                % (charge_values[i]),
                x_axis_title='Hit delay [ns]',
                minimum=np.amin(hist_hit_delay[:, :, i]),
                maximum=np.max(hist_hit_delay[:, :, i]),
                filename=output_pdf)
        output_pdf.close()
コード例 #30
0
    def scan(self):
        cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0]

        self.write_target_threshold()

        scan_parameter_range = [(2 ** self.register.global_registers['Vthin_AltFine']['bitlength']), 0]  # high to low
        if self.start_gdac:
            scan_parameter_range[0] = self.start_gdac
        if self.gdac_lower_limit:
            scan_parameter_range[1] = self.gdac_lower_limit

        scan_parameter_range = np.arange(scan_parameter_range[0], scan_parameter_range[1] - 1, self.step_size)

        logging.info("Scanning %s from %d to %d", 'GDAC', scan_parameter_range[0], scan_parameter_range[-1])

        def bits_set(int_type):
            int_type = int(int_type)
            position = 0
            bits_set = []
            while(int_type):
                if(int_type & 1):
                    bits_set.append(position)
                position += 1
                int_type = int_type >> 1
            return bits_set

        # calculate selected pixels from the mask and the disabled columns
        select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)
        self.occ_array_sel_pixels_best = select_mask_array.copy()
        self.occ_array_desel_pixels_best = select_mask_array.copy()
        if not self.enable_mask_steps_gdac:
            self.enable_mask_steps_gdac = range(self.mask_steps)
        for mask_step in self.enable_mask_steps_gdac:
            select_mask_array += make_pixel_mask(steps=self.mask_steps, shift=mask_step)
        for column in bits_set(self.register.get_global_register_value("DisableColumnCnfg")):
            logging.info('Deselect double column %d' % column)
            select_mask_array[column, :] = 0

        gdac_values = []
        gdac_occupancy = []
        gdac_occ_array_sel_pixels = []
        gdac_occ_array_desel_pixels = []
        median_occupancy_last_step = None
        for scan_parameter_value in scan_parameter_range:
            self.register_utils.set_gdac(scan_parameter_value)
            with self.readout(GDAC=scan_parameter_value, fill_buffer=True):
                scan_loop(self,
                          command=cal_lvl1_command,
                          repeat_command=self.n_injections_gdac,
                          mask_steps=self.mask_steps,
                          enable_mask_steps=self.enable_mask_steps_gdac,
                          enable_double_columns=None,
                          same_mask_for_all_dc=self.same_mask_for_all_dc,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            data = convert_data_array(array=self.read_data(), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array)
            occupancy_array, _, _ = np.histogram2d(*data, bins=(80, 336), range=[[1, 80], [1, 336]])
            occ_array_sel_pixels = np.ma.array(occupancy_array, mask=np.logical_not(np.ma.make_mask(select_mask_array)))  # take only selected pixel into account by using the mask
            occ_array_desel_pixels = np.ma.array(occupancy_array, mask=np.ma.make_mask(select_mask_array))  # take only de-selected pixel into account by using the inverted mask
            median_occupancy = np.ma.median(occ_array_sel_pixels)
            noise_occupancy = np.ma.median(occ_array_desel_pixels)
            occupancy_almost_zero = np.allclose(median_occupancy, 0)
            no_noise = np.allclose(noise_occupancy, 0)
            gdac_values.append(self.register_utils.get_gdac())
            gdac_occupancy.append(median_occupancy)
            gdac_occ_array_sel_pixels.append(occ_array_sel_pixels.copy())
            gdac_occ_array_desel_pixels.append(occ_array_desel_pixels.copy())
            self.occ_array_sel_pixels_best = occ_array_sel_pixels.copy()
            self.occ_array_desel_pixels_best = occ_array_desel_pixels.copy()

            if self.plot_intermediate_steps:
                plot_three_way(self.occ_array_sel_pixel.transpose(), title="Occupancy (GDAC " + str(scan_parameter_value) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac)

            # abort early if threshold is found
            if no_noise and not occupancy_almost_zero and (median_occupancy_last_step is not None and median_occupancy >= median_occupancy_last_step) and median_occupancy >= self.n_injections_gdac / 2:
                break

            if no_noise and not occupancy_almost_zero:
                median_occupancy_last_step = median_occupancy
            else:
                median_occupancy_last_step = 0.0

        # select best GDAC value
        occupancy_sorted = np.array(gdac_occupancy)[np.argsort(np.array(gdac_values))]
        gdac_sorted = np.sort(gdac_values)
        gdac_min_idx = np.where(occupancy_sorted >= self.n_injections_gdac / 2)[0][-1]
        occupancy_sorted_sel = occupancy_sorted[gdac_min_idx:]
        gdac_sorted_sel = gdac_sorted[gdac_min_idx:]
        gdac_best_idx = np.abs(np.array(occupancy_sorted_sel) - self.n_injections_gdac / 2).argmin()
        gdac_best = gdac_sorted_sel[gdac_best_idx]
        occupancy_best = occupancy_sorted_sel[gdac_best_idx]
        median_occupancy = occupancy_best
        self.register_utils.set_gdac(gdac_best, send_command=False)
        # for plotting
        self.occ_array_sel_pixels_best = np.array(gdac_occ_array_sel_pixels)[np.argsort(np.array(gdac_values))][gdac_best_idx]
        self.occ_array_desel_pixels_best = np.array(gdac_occ_array_sel_pixels)[np.argsort(np.array(gdac_values))][gdac_best_idx]

        self.gdac_best = self.register_utils.get_gdac()

        if abs(median_occupancy - self.n_injections_gdac / 2) > self.max_delta_threshold and not self.stop_run.is_set():
            if np.all((((self.gdac_best & (1 << np.arange(self.register.global_registers['Vthin_AltFine']['bitlength'] + self.register.global_registers['Vthin_AltFine']['bitlength'])))) > 0).astype(int) == 0):
                if self.fail_on_warning:
                    raise RuntimeWarning('Selected GDAC bits reached minimum value')
                else:
                    logging.warning('Selected GDAC bits reached minimum value')
            else:
                if self.fail_on_warning:
                    raise RuntimeWarning('Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d' % (abs(median_occupancy - self.n_injections_gdac / 2), self.max_delta_threshold, self.register.get_global_register_value("Vthin_AltCoarse"), self.register.get_global_register_value("Vthin_AltFine")))
                else:
                    logging.warning('Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d', abs(median_occupancy - self.n_injections_gdac / 2), self.max_delta_threshold, self.register.get_global_register_value("Vthin_AltCoarse"), self.register.get_global_register_value("Vthin_AltFine"))
        else:
            logging.info('Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d', self.register.get_global_register_value("Vthin_AltCoarse"), self.register.get_global_register_value("Vthin_AltFine"))
コード例 #31
0
ファイル: tune_tdac.py プロジェクト: CARIBOuSystem/pyBAR
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False
        mask_steps = 3
        enable_mask_steps = []
        cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", mask_steps=mask_steps)[0]

        self.write_target_threshold()
        additional_scan = True
        lastBitResult = np.zeros(shape=self.register.get_pixel_register_value("TDAC").shape, dtype=self.register.get_pixel_register_value("TDAC").dtype)

        self.set_start_tdac()

        self.occupancy_best = np.empty(shape=(80, 336))  # array to store the best occupancy (closest to Ninjections/2) of the pixel
        self.occupancy_best.fill(self.n_injections_tdac)
        self.tdac_mask_best = self.register.get_pixel_register_value("TDAC")

        for scan_parameter_value, tdac_bit in enumerate(self.tdac_tune_bits):
            if additional_scan:
                self.set_tdac_bit(tdac_bit)
                logging.info('TDAC setting: bit %d = 1', tdac_bit)
            else:
                self.set_tdac_bit(tdac_bit, bit_value=0)
                logging.info('TDAC setting: bit %d = 0', tdac_bit)

            self.write_tdac_config()

            with self.readout(TDAC=scan_parameter_value, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data):
                scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_tdac, mask_steps=mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction)

            occupancy_array, _, _ = np.histogram2d(*convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]])
            select_better_pixel_mask = abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)
            pixel_with_too_high_occupancy_mask = occupancy_array > self.n_injections_tdac / 2
            self.occupancy_best[select_better_pixel_mask] = occupancy_array[select_better_pixel_mask]

            if self.plot_intermediate_steps:
                plot_three_way(occupancy_array.transpose(), title="Occupancy (TDAC tuning bit " + str(tdac_bit) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_tdac)

            tdac_mask = self.register.get_pixel_register_value("TDAC")
            self.tdac_mask_best[select_better_pixel_mask] = tdac_mask[select_better_pixel_mask]

            if tdac_bit > 0:
                tdac_mask[pixel_with_too_high_occupancy_mask] = tdac_mask[pixel_with_too_high_occupancy_mask] & ~(1 << tdac_bit)
                self.register.set_pixel_register_value("TDAC", tdac_mask)

            if tdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    lastBitResult = occupancy_array.copy()
                    self.tdac_tune_bits.append(0)  # bit 0 has to be scanned twice
                else:
                    tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] | (1 << tdac_bit)
                    occupancy_array[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = lastBitResult[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)]
                    self.occupancy_best[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = occupancy_array[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)]
                    self.tdac_mask_best[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)]

        self.register.set_pixel_register_value("TDAC", self.tdac_mask_best)  # set value for meta scan
        self.write_tdac_config()
コード例 #32
0
ファイル: tune_fdac.py プロジェクト: experimentAccount0/pyBAR
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False

        enable_mask_steps = []

        cal_lvl1_command = self.register.get_commands(
            "CAL")[0] + self.register.get_commands(
                "zeros", length=40)[0] + self.register.get_commands("LV1")[0]

        self.write_target_charge()
        additional_scan = True
        lastBitResult = np.zeros(
            shape=self.register.get_pixel_register_value("FDAC").shape,
            dtype=self.register.get_pixel_register_value("FDAC").dtype)

        self.set_start_fdac()

        self.tot_mean_best = np.full(
            shape=(80, 336), fill_value=0
        )  # array to store the best occupancy (closest to Ninjections/2) of the pixel
        self.fdac_mask_best = self.register.get_pixel_register_value("FDAC")
        fdac_tune_bits = self.fdac_tune_bits[:]
        for scan_parameter_value, fdac_bit in enumerate(fdac_tune_bits):
            if additional_scan:
                self.set_fdac_bit(fdac_bit)
                logging.info('FDAC setting: bit %d = 1', fdac_bit)
            else:
                self.set_fdac_bit(fdac_bit, bit_value=0)
                logging.info('FDAC setting: bit %d = 0', fdac_bit)

            self.write_fdac_config()

            with self.readout(FDAC=scan_parameter_value,
                              reset_sram_fifo=True,
                              fill_buffer=True,
                              clear_buffer=True,
                              callback=self.handle_data):
                scan_loop(self,
                          command=cal_lvl1_command,
                          repeat_command=self.n_injections_fdac,
                          mask_steps=self.mask_steps,
                          enable_mask_steps=enable_mask_steps,
                          enable_double_columns=None,
                          same_mask_for_all_dc=self.same_mask_for_all_dc,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            col_row_tot = np.column_stack(
                convert_data_array(
                    data_array_from_data_iterable(self.fifo_readout.data),
                    filter_func=logical_and(is_fe_word, is_data_record),
                    converter_func=get_col_row_tot_array_from_data_record_array
                ))
            tot_array = np.histogramdd(col_row_tot,
                                       bins=(80, 336, 16),
                                       range=[[1, 80], [1, 336], [0, 15]])[0]
            tot_mean_array = np.average(
                tot_array, axis=2, weights=range(0, 16)) * sum(range(
                    0, 16)) / self.n_injections_fdac
            select_better_pixel_mask = abs(
                tot_mean_array - self.target_tot) <= abs(self.tot_mean_best -
                                                         self.target_tot)
            pixel_with_too_small_mean_tot_mask = tot_mean_array < self.target_tot
            self.tot_mean_best[select_better_pixel_mask] = tot_mean_array[
                select_better_pixel_mask]

            if self.plot_intermediate_steps:
                plot_three_way(hist=tot_mean_array.transpose().transpose(),
                               title="Mean ToT (FDAC tuning bit " +
                               str(fdac_bit) + ")",
                               x_axis_title='mean ToT',
                               filename=self.plots_filename,
                               minimum=0,
                               maximum=15)

            fdac_mask = self.register.get_pixel_register_value("FDAC")
            self.fdac_mask_best[select_better_pixel_mask] = fdac_mask[
                select_better_pixel_mask]
            if fdac_bit > 0:
                fdac_mask[pixel_with_too_small_mean_tot_mask] = fdac_mask[
                    pixel_with_too_small_mean_tot_mask] & ~(1 << fdac_bit)
                self.register.set_pixel_register_value("FDAC", fdac_mask)

            if fdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    lastBitResult = tot_mean_array.copy()
                    fdac_tune_bits.append(0)  # bit 0 has to be scanned twice
                else:
                    fdac_mask[abs(tot_mean_array - self.target_tot) > abs(
                        lastBitResult - self.target_tot
                    )] = fdac_mask[abs(tot_mean_array - self.target_tot) > abs(
                        lastBitResult - self.target_tot)] | (1 << fdac_bit)
                    tot_mean_array[abs(tot_mean_array - self.target_tot) > abs(
                        lastBitResult - self.target_tot)] = lastBitResult[
                            abs(tot_mean_array -
                                self.target_tot) > abs(lastBitResult -
                                                       self.target_tot)]
                    self.tot_mean_best[
                        abs(tot_mean_array - self.target_tot) <= abs(
                            self.tot_mean_best -
                            self.n_injections_fdac / 2)] = tot_mean_array[
                                abs(tot_mean_array - self.target_tot) <= abs(
                                    self.tot_mean_best -
                                    self.n_injections_fdac / 2)]
                    self.fdac_mask_best[
                        abs(tot_mean_array - self.target_tot) <= abs(
                            self.tot_mean_best -
                            self.n_injections_fdac / 2)] = fdac_mask[
                                abs(tot_mean_array - self.target_tot) <= abs(
                                    self.tot_mean_best -
                                    self.n_injections_fdac / 2)]

        self.register.set_pixel_register_value(
            "FDAC", self.fdac_mask_best)  # set value for meta scan
        self.write_fdac_config()
コード例 #33
0
def histogram_tdc_hits(input_file_hits, hit_selection_conditions, event_status_select_mask, event_status_condition, calibation_file=None, correct_calibration=None, max_tdc=analysis_configuration['max_tdc'], n_bins=analysis_configuration['n_bins']):
    for condition in hit_selection_conditions:
        logging.info('Histogram tdc hits with %s', condition)

    def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration):  # return the charge from calibration
        charge_calibration = np.zeros(shape=(80, 336, max_tdc))
        for column in range(80):
            for row in range(336):
                actual_pixel_calibration = tdc_pixel_calibration[column, row, :]
                if np.any(actual_pixel_calibration != 0) and np.all(np.isfinite(actual_pixel_calibration)):
                    interpolation = interp1d(x=actual_pixel_calibration, y=tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0)
                    charge_calibration[column, row, :] = interpolation(np.arange(max_tdc))
        return charge_calibration

    def plot_tdc_tot_correlation(data, condition, output_pdf):
        logging.info('Plot correlation histogram for %s', condition)
        plt.clf()
        data = np.ma.array(data, mask=(data <= 0))
        if np.ma.any(data > 0):
            cmap = cm.get_cmap('jet', 200)
            cmap.set_bad('w')
            plt.title('Correlation with %s' % condition)
            norm = colors.LogNorm()
            z_max = data.max(fill_value=0)
            plt.xlabel('TDC')
            plt.ylabel('TOT')
            im = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', interpolation='nearest')  # , norm=norm)
            divider = make_axes_locatable(plt.gca())
            plt.gca().invert_yaxis()
            cax = divider.append_axes("right", size="5%", pad=0.1)
            plt.colorbar(im, cax=cax, ticks=np.linspace(start=0, stop=z_max, num=9, endpoint=True))
            output_pdf.savefig()
        else:
            logging.warning('No data for correlation plotting for %s', condition)

    def plot_hits_per_condition(output_pdf):
        logging.info('Plot hits selection efficiency histogram for %d conditions', len(hit_selection_conditions) + 2)
        labels = ['All Hits', 'Hits of\ngood events']
        for condition in hit_selection_conditions:
            condition = re.sub('[&]', '\n', condition)
            condition = re.sub('[()]', '', condition)
            labels.append(condition)
        plt.clf()
        plt.bar(range(len(n_hits_per_condition)), n_hits_per_condition, align='center')
        plt.xticks(range(len(n_hits_per_condition)), labels, size=8)
        plt.title('Number of hits for different cuts')
        plt.yscale('log')
        plt.ylabel('#')
        plt.grid()
        for x, y in zip(np.arange(len(n_hits_per_condition)), n_hits_per_condition):
            plt.annotate('%d' % (float(y) / float(n_hits_per_condition[0]) * 100.) + r'%', xy=(x, y / 2.), xycoords='data', color='grey', size=15)
        output_pdf.savefig()

    def plot_corrected_tdc_hist(x, y, title, output_pdf, point_style='-'):
        logging.info('Plot TDC hist with TDC calibration')
        plt.clf()
        y /= np.amax(y) if y.shape[0] > 0 else y
        plt.plot(x, y, point_style)
        plt.title(title, size=10)
        plt.xlabel('Charge [PlsrDAC]')
        plt.ylabel('Count [a.u.]')
        plt.grid()
        output_pdf.savefig()

    def get_calibration_correction(tdc_calibration, tdc_calibration_values, filename_new_calibration):  # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means
        with tb.open_file(filename_new_calibration, 'r') as in_file_2:
            charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :, 1]

            plsr_dacs = tdc_calibration_values
            if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration._v_attrs.scan_parameter_values):
                raise NotImplementedError('The check calibration file has to have the same PlsrDAC values')

            valid_pixel = np.where(np.logical_and(charge_calibration_1.sum(axis=2) > 0, charge_calibration_2.sum(axis=2) > 0))  # valid pixel have a calibration in the new and the old calibration
            mean_charge_calibration = charge_calibration_2[valid_pixel].mean(axis=0)
            offset_mean = (charge_calibration_1[valid_pixel] - charge_calibration_2[valid_pixel]).mean(axis=0)

            dPlsrDAC_dTDC = analysis_utils.smooth_differentiation(plsr_dacs, mean_charge_calibration, order=3, smoothness=0, derivation=1)

            plt.clf()
            plt.plot(plsr_dacs, offset_mean / dPlsrDAC_dTDC, '.-', label='PlsrDAC')
            plt.plot(plsr_dacs, offset_mean, '.-', label='TDC')
            plt.grid()
            plt.xlabel('PlsrDAC')
            plt.ylabel('Mean calibration offset')
            plt.legend(loc=0)
            plt.title('Mean offset between TDC calibration data, old - new ')
            plt.show()
            return offset_mean

    # Create data
    with tb.openFile(input_file_hits, mode="r") as in_hit_file_h5:
        cluster_hit_table = in_hit_file_h5.root.ClusterHits

        # Result hists, initialized per condition
        pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336, max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336, 256), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        mean_pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_hists_per_condition = [np.zeros(shape=(max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else []
        tdc_corr_hists_per_condition = [np.zeros(shape=(max_tdc, 16), dtype=np.uint32) for _ in hit_selection_conditions] if hit_selection_conditions else []

        n_hits_per_condition = [0 for _ in range(len(hit_selection_conditions) + 2)]  # condition 1, 2 are all hits, hits of goode events

        logging.info('Select hits and create TDC histograms for %d cut conditions', len(hit_selection_conditions))
        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_hit_table.shape[0], term_width=80)
        progress_bar.start()
        for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=10000000):
            n_hits_per_condition[0] += cluster_hits.shape[0]
            selected_events_cluster_hits = cluster_hits[np.logical_and(cluster_hits['TDC'] < max_tdc, (cluster_hits['event_status'] & event_status_select_mask) == event_status_condition)]
            n_hits_per_condition[1] += selected_events_cluster_hits.shape[0]
            for index, condition in enumerate(hit_selection_conditions):
                selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition)
                n_hits_per_condition[2 + index] += selected_cluster_hits.shape[0]
                column, row, tdc = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC']
                pixel_tdc_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc, shape=(80, 336, max_tdc))
                mean_pixel_tdc_hists_per_condition[index] = np.average(pixel_tdc_hists_per_condition[index], axis=2, weights=range(0, max_tdc)) * np.sum(np.arange(0, max_tdc)) / pixel_tdc_hists_per_condition[index].sum(axis=2)
                tdc_timestamp = selected_cluster_hits['TDC_time_stamp']
                pixel_tdc_timestamp_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc_timestamp, shape=(80, 336, 256))
                mean_pixel_tdc_timestamp_hists_per_condition[index] = np.average(pixel_tdc_timestamp_hists_per_condition[index], axis=2, weights=range(0, 256)) * np.sum(np.arange(0, 256)) / pixel_tdc_timestamp_hists_per_condition[index].sum(axis=2)
                tdc_hists_per_condition[index] = pixel_tdc_hists_per_condition[index].sum(axis=(0, 1))
                tdc_corr_hists_per_condition[index] += analysis_utils.hist_2d_index(tdc, selected_cluster_hits['tot'], shape=(max_tdc, 16))
            progress_bar.update(n_hits_per_condition[0])
        progress_bar.finish()

        # Take TDC calibration if available and calculate charge for each TDC value and pixel
        if calibation_file is not None:
            with tb.openFile(calibation_file, mode="r") as in_file_calibration_h5:
                tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1]
                tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
                if correct_calibration is not None:
                    tdc_calibration += get_calibration_correction(tdc_calibration, tdc_calibration_values, correct_calibration)
            charge_calibration = get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
        else:
            charge_calibration = None

        # Store data of result histograms
        with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="w") as out_file_h5:
            for index, condition in enumerate(hit_selection_conditions):
                pixel_tdc_hist_result = np.swapaxes(pixel_tdc_hists_per_condition[index], 0, 1)
                pixel_tdc_timestamp_hist_result = np.swapaxes(pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_hist_result = np.swapaxes(mean_pixel_tdc_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_timestamp_hist_result = np.swapaxes(mean_pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                tdc_hists_per_condition_result = tdc_hists_per_condition[index]
                tdc_corr_hist_result = np.swapaxes(tdc_corr_hists_per_condition[index], 0, 1)
                # Create result hists
                out_1 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcCondition_%d' % index, title='Hist Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_hist_result.dtype), shape=pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcTimestampCondition_%d' % index, title='Hist Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_timestamp_hist_result.dtype), shape=pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcCondition_%d' % index, title='Hist Mean Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_hist_result.dtype), shape=mean_pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_4 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcTimestampCondition_%d' % index, title='Hist Mean Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_timestamp_hist_result.dtype), shape=mean_pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_5 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCondition_%d' % index, title='Hist Tdc with %s' % condition, atom=tb.Atom.from_dtype(tdc_hists_per_condition_result.dtype), shape=tdc_hists_per_condition_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                out_6 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCorrCondition_%d' % index, title='Hist Correlation Tdc/Tot with %s' % condition, atom=tb.Atom.from_dtype(tdc_corr_hist_result.dtype), shape=tdc_corr_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                # Add result hists information
                out_1.attrs.dimensions, out_1.attrs.condition, out_1.attrs.tdc_values = 'column, row, TDC value', condition, range(max_tdc)
                out_2.attrs.dimensions, out_2.attrs.condition, out_2.attrs.tdc_values = 'column, row, TDC time stamp value', condition, range(256)
                out_3.attrs.dimensions, out_3.attrs.condition = 'column, row, mean TDC value', condition
                out_4.attrs.dimensions, out_4.attrs.condition = 'column, row, mean TDC time stamp value', condition
                out_5.attrs.dimensions, out_5.attrs.condition = 'PlsrDAC', condition
                out_6.attrs.dimensions, out_6.attrs.condition = 'TDC, TOT', condition
                out_1[:], out_2[:], out_3[:], out_4[:], out_5[:], out_6[:] = pixel_tdc_hist_result, pixel_tdc_timestamp_hist_result, mean_pixel_tdc_hist_result, mean_pixel_tdc_timestamp_hist_result, tdc_hists_per_condition_result, tdc_corr_hist_result

                if charge_calibration is not None:
                    # Select only valid pixel for histograming: they have data and a calibration (that is any charge(TDC) calibration != 0)
                    valid_pixel = np.where(np.logical_and(charge_calibration[:, :, :max_tdc].sum(axis=2) > 0, pixel_tdc_hist_result[:, :, :max_tdc].swapaxes(0, 1).sum(axis=2) > 0))

                    mean_charge_calibration = charge_calibration[valid_pixel][:, :max_tdc].mean(axis=0)
                    mean_tdc_hist = pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].mean(axis=0)
                    result_array = np.rec.array(np.column_stack((mean_charge_calibration, mean_tdc_hist)), dtype=[('charge', float), ('count', float)])
                    out_6 = out_file_h5.create_table(out_file_h5.root, name='HistMeanTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with mean charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_6.attrs.condition = condition
                    out_6.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_6.append(result_array)
                    # Create charge histogram with per pixel TDC(charge) calibration
                    x, y = charge_calibration[valid_pixel][:, :max_tdc].ravel(), np.ravel(pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].ravel())
                    y, x = y[x > 0], x[x > 0]  # remove the hit tdcs without proper calibration plsrDAC(TDC) calibration
                    x, y, yerr = analysis_utils.get_profile_histogram(x, y, n_bins=n_bins)
                    result_array = np.rec.array(np.column_stack((x, y, yerr)), dtype=[('charge', float), ('count', float), ('count_error', float)])
                    out_7 = out_file_h5.create_table(out_file_h5.root, name='HistTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with per pixel charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    out_7.attrs.condition = condition
                    out_7.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_7.append(result_array)

    # Plot Data
    with PdfPages(input_file_hits[:-3] + '_calibrated_tdc_hists.pdf') as output_pdf:
        plot_hits_per_condition(output_pdf)
        with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="r") as in_file_h5:
            for node in in_file_h5.root:  # go through the data and plot them
                if 'MeanPixel' in node.name:
                    try:
                        plot_three_way(np.ma.masked_invalid(node[:]) * 1.5625, title='Mean TDC delay, hits with\n%s' % node._v_attrs.condition if 'Timestamp' in node.name else 'Mean TDC, hits with\n%s' % node._v_attrs.condition, filename=output_pdf)
                    except ValueError:
                        logging.warning('Cannot plot TDC delay')
                elif 'HistTdcCondition' in node.name:
                    hist_1d = node[:]
                    entry_index = np.where(hist_1d != 0)
                    if entry_index[0].shape[0] != 0:
                        max_index = np.amax(entry_index)
                    else:
                        max_index = max_tdc
                    plot_1d_hist(hist_1d[:max_index + 10], title='TDC histogram, hits with\n%s' % node._v_attrs.condition if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits with\n%s' % node._v_attrs.condition, x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf)
                elif 'HistPixelTdc' in node.name:
                    hist_3d = node[:]
                    entry_index = np.where(hist_3d.sum(axis=(0, 1)) != 0)
                    if entry_index[0].shape[0] != 0:
                        max_index = np.amax(entry_index)
                    else:
                        max_index = max_tdc
                    best_pixel_index = np.where(hist_3d.sum(axis=2) == np.amax(node[:].sum(axis=2)))
                    if best_pixel_index[0].shape[0] == 1:  # there could be more than one pixel with most hits
                        try:
                            plot_1d_hist(hist_3d[best_pixel_index][0, :max_index], title='TDC histogram of pixel %d, %d\n%s' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1, node._v_attrs.condition) if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits of pixel %d, %d' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1), x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf)
                        except IndexError:
                            logging.warning('Cannot plot best pixel TDC histogram')
                elif 'HistTdcCalibratedCondition' in node.name:
                    plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, per pixel TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf)
                elif 'HistMeanTdcCalibratedCondition' in node.name:
                    plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, mean TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf)
                elif 'HistTdcCorr' in node.name:
                    plot_tdc_tot_correlation(node[:], node._v_attrs.condition, output_pdf)
コード例 #34
0
ファイル: scan_hit_delay.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        #         plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1']
        plsr_dac_slope = 55.0

        # Interpret data and create hit table
        with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=False) as analyze_raw_data:
            analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histograming
            analyze_raw_data.create_hit_table = True
            analyze_raw_data.interpreter.set_warning_output(False)  # a lot of data produces unknown words
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()

        # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting
        with tb.open_file(self.output_filename + "_analyzed.h5", mode="w") as out_file_h5:
            hists_folder = out_file_h5.create_group(out_file_h5.root, "PixelHistsMeanRelBcid")
            hists_folder_2 = out_file_h5.create_group(out_file_h5.root, "PixelHistsRelBcid")
            hists_folder_3 = out_file_h5.create_group(out_file_h5.root, "PixelHistsTot")
            hists_folder_4 = out_file_h5.create_group(out_file_h5.root, "PixelHistsMeanTot")
            hists_folder_5 = out_file_h5.create_group(out_file_h5.root, "HistsTot")

            def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array):
                logging.debug("Store histograms for PlsrDAC " + str(old_plsr_dac))
                bcid_mean_array = (
                    np.average(bcid_array, axis=3, weights=range(0, 16))
                    * sum(range(0, 16))
                    / np.sum(bcid_array, axis=3).astype("f4")
                )  # calculate the mean BCID per pixel and scan parameter
                tot_pixel_mean_array = (
                    np.average(tot_pixel_array, axis=3, weights=range(0, 16))
                    * sum(range(0, 16))
                    / np.sum(tot_pixel_array, axis=3).astype("f4")
                )  # calculate the mean tot per pixel and scan parameter
                bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1)
                bcid_result = np.swapaxes(bcid_array, 0, 1)
                tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1)
                tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1)

                out = out_file_h5.createCArray(
                    hists_folder,
                    name="HistPixelMeanRelBcidPerDelayPlsrDac_%03d" % old_plsr_dac,
                    title="Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC "
                    + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(bcid_mean_result.dtype),
                    shape=bcid_mean_result.shape,
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                out.attrs.dimensions = "column, row, injection delay"
                out.attrs.injection_delay_values = injection_delay
                out[:] = bcid_mean_result
                out_2 = out_file_h5.createCArray(
                    hists_folder_2,
                    name="HistPixelRelBcidPerDelayPlsrDac_%03d" % old_plsr_dac,
                    title="Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(bcid_result.dtype),
                    shape=bcid_result.shape,
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                out_2.attrs.dimensions = "column, row, injection delay, relative bcid"
                out_2.attrs.injection_delay_values = injection_delay
                out_2[:] = bcid_result
                out_3 = out_file_h5.createCArray(
                    hists_folder_3,
                    name="HistPixelTotPerDelayPlsrDac_%03d" % old_plsr_dac,
                    title="Tot hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_pixel_result.dtype),
                    shape=tot_pixel_result.shape,
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                out_3.attrs.dimensions = "column, row, injection delay"
                out_3.attrs.injection_delay_values = injection_delay
                out_3[:] = tot_pixel_result
                out_4 = out_file_h5.createCArray(
                    hists_folder_4,
                    name="HistPixelMeanTotPerDelayPlsrDac_%03d" % old_plsr_dac,
                    title="Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype),
                    shape=tot_mean_pixel_result.shape,
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                out_4.attrs.dimensions = "column, row, injection delay"
                out_4.attrs.injection_delay_values = injection_delay
                out_4[:] = tot_mean_pixel_result
                out_5 = out_file_h5.createCArray(
                    hists_folder_5,
                    name="HistTotPlsrDac_%03d" % old_plsr_dac,
                    title="Tot histogram for PlsrDAC " + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_array.dtype),
                    shape=tot_array.shape,
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                out_5.attrs.injection_delay_values = injection_delay
                out_5[:] = tot_array

            old_plsr_dac = None

            # Get scan parameters from interpreted file
            with tb.open_file(self.output_filename + "_interpreted.h5", "r") as in_file_h5:
                scan_parameters_dict = get_scan_parameter(in_file_h5.root.meta_data[:])
                plsr_dac = scan_parameters_dict["PlsrDAC"]
                hists_folder._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_2._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_3._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_4._v_attrs.plsr_dac_values = plsr_dac
                injection_delay = scan_parameters_dict[
                    scan_parameters_dict.keys()[1]
                ]  # injection delay par name is unknown and should  be in the inner loop
                scan_parameters = scan_parameters_dict.keys()

            bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int16)  # bcid array of actual PlsrDAC
            tot_pixel_array = np.zeros(
                (80, 336, len(injection_delay), 16), dtype=np.int16
            )  # tot pixel array of actual PlsrDAC
            tot_array = np.zeros((16,), dtype=np.int32)  # tot array of actual PlsrDAC

            logging.info("Store histograms for PlsrDAC values " + str(plsr_dac))
            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    progressbar.AdaptiveETA(),
                ],
                maxval=max(plsr_dac) - min(plsr_dac),
                term_width=80,
            )

            for index, (parameters, hits) in enumerate(
                get_hits_of_scan_parameter(self.output_filename + "_interpreted.h5", scan_parameters, chunk_size=1.5e7)
            ):
                if index == 0:
                    progress_bar.start()  # start after the event index is created to get reasonable ETA
                actual_plsr_dac, actual_injection_delay = parameters[0], parameters[1]
                column, row, rel_bcid, tot = hits["column"] - 1, hits["row"] - 1, hits["relative_BCID"], hits["tot"]
                bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16))
                tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16))
                tot_array_fast = hist_1d_index(tot, shape=(16,))

                if old_plsr_dac != actual_plsr_dac:  # Store the data of the actual PlsrDAC value
                    if old_plsr_dac:  # Special case for the first PlsrDAC setting
                        store_bcid_histograms(bcid_array, tot_array, tot_pixel_array)
                        progress_bar.update(old_plsr_dac - min(plsr_dac))
                    # Reset the histrograms for the next PlsrDAC setting
                    bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8)
                    tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8)
                    tot_array = np.zeros((16,), dtype=np.int32)
                    old_plsr_dac = actual_plsr_dac
                injection_delay_index = np.where(np.array(injection_delay) == actual_injection_delay)[0][0]
                bcid_array[:, :, injection_delay_index, :] += bcid_array_fast
                tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast
                tot_array += tot_array_fast
            store_bcid_histograms(bcid_array, tot_array, tot_pixel_array)  # save histograms of last PlsrDAC setting
            progress_bar.finish()

        # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel
        with tb.open_file(self.output_filename + "_analyzed.h5", mode="r") as in_file_h5:
            # Create temporary result data structures
            plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
            timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8)  # result array
            tot = np.zeros(shape=(len(plsr_dac_values),), dtype=np.float16)  # result array
            hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8)  # result array
            min_rel_bcid = np.zeros(
                shape=(80, 336), dtype=np.int8
            )  # Temp array to make sure that the Scurve from the same BCID is used
            delay_calibration_data = []
            delay_calibration_data_error = []

            # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work.
            plsr_dac_min = min(plsr_dac_values)
            rel_bcid_min_injection = in_file_h5.get_node(
                in_file_h5.root.PixelHistsMeanRelBcid, "HistPixelMeanRelBcidPerDelayPlsrDac_%03d" % plsr_dac_min
            )
            injection_delays = np.array(rel_bcid_min_injection.attrs.injection_delay_values)
            injection_delay_min = np.where(injection_delays == np.amax(injection_delays))[0][0]
            bcid_min = (
                int(
                    round(
                        np.mean(
                            np.ma.masked_array(
                                rel_bcid_min_injection[:, :, injection_delay_min],
                                np.isnan(rel_bcid_min_injection[:, :, injection_delay_min]),
                            )
                        )
                    )
                )
                - 1
            )

            # Info output with progressbar
            logging.info("Create timewalk info for PlsrDACs " + str(plsr_dac_values))
            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    progressbar.AdaptiveETA(),
                ],
                maxval=len(plsr_dac_values),
                term_width=80,
            )
            progress_bar.start()

            for index, node in enumerate(
                in_file_h5.root.PixelHistsMeanRelBcid
            ):  # loop over all mean relative BCID hists for all PlsrDAC values
                # Select the S-curves
                pixel_data = node[:, :, :]
                pixel_data_fixed = pixel_data.reshape(
                    pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]
                )  # Reshape for interpolation of Nans
                nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0]
                pixel_data_fixed[nans] = np.interp(x(nans), x(~nans), pixel_data_fixed[~nans])  # interpolate Nans
                pixel_data_fixed = pixel_data_fixed.reshape(
                    pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]
                )  # Reshape after interpolation of Nans
                pixel_data_round = np.round(pixel_data_fixed)
                pixel_data_round_diff = np.diff(pixel_data_round, axis=2)
                index_sel = np.where(np.logical_and(pixel_data_round_diff > 0.0, np.isfinite(pixel_data_round_diff)))

                # Temporary result histograms to be filled
                first_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the first S-curve in the data for the lowest injection (for time walk)
                second_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the second S-curve in the data (to calibrate one inj. delay step)
                a_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the mean of the S-curve at a given rel. BCID (for hit delay)

                # Loop over the S-curve means
                for (row_index, col_index, delay_index) in np.column_stack((index_sel)):
                    delay = injection_delays[delay_index]
                    if first_scurve_mean[col_index, row_index] == 0:
                        if delay_index == 0:  # ignore the first index, can be wrong due to nan filling
                            continue
                        if (
                            pixel_data_round[row_index, col_index, delay] >= min_rel_bcid[col_index, row_index]
                        ):  # make sure to always use the data of the same BCID
                            first_scurve_mean[col_index, row_index] = delay
                            min_rel_bcid[col_index, row_index] = pixel_data_round[row_index, col_index, delay]
                    elif (
                        second_scurve_mean[col_index, row_index] == 0
                        and (delay - first_scurve_mean[col_index, row_index]) > 20
                    ):  # minimum distance 10, can otherwise be data 'jitter'
                        second_scurve_mean[col_index, row_index] = delay
                    if pixel_data_round[row_index, col_index, delay] == bcid_min:
                        if a_scurve_mean[col_index, row_index] == 0:
                            a_scurve_mean[col_index, row_index] = delay

                plsr_dac = int(re.search(r"\d+", node.name).group())
                plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
                if (np.count_nonzero(first_scurve_mean) - np.count_nonzero(a_scurve_mean)) > 1e3:
                    logging.warning(
                        "The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong."
                    )
                selection = (second_scurve_mean - first_scurve_mean)[
                    np.logical_and(second_scurve_mean > 0, first_scurve_mean < second_scurve_mean)
                ]
                delay_calibration_data.append(np.mean(selection))
                delay_calibration_data_error.append(np.std(selection))
                # Store the actual PlsrDAC data into result hist
                timewalk[
                    :, :, plsr_dac_index
                ] = first_scurve_mean  # Save the plsr delay of first s-curve (for time walk calc.)
                hit_delay[
                    :, :, plsr_dac_index
                ] = a_scurve_mean  # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.)
                progress_bar.update(index)

            for index, node in enumerate(in_file_h5.root.HistsTot):  # loop over tot hist for all PlsrDAC values
                plsr_dac = int(re.search(r"\d+", node.name).group())
                plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
                tot_data = node[:]
                tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16))

            # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels
            delay_calibration_mean = np.mean(
                np.array(delay_calibration_data[2:])[np.isfinite(np.array(delay_calibration_data[2:]))]
            )
            delay_calibration, delay_calibration_error = curve_fit(
                lambda x, par: (par),
                injection_delays,
                delay_calibration_data,
                p0=delay_calibration_mean,
                sigma=delay_calibration_data_error,
                absolute_sigma=True,
            )
            delay_calibration, delay_calibration_error = delay_calibration[0], delay_calibration_error[0][0]

            progress_bar.finish()

        #  Save time walk / hit delay hists
        with tb.open_file(self.output_filename + "_analyzed.h5", mode="r+") as out_file_h5:
            timewalk_result = np.swapaxes(timewalk, 0, 1)
            hit_delay_result = np.swapaxes(hit_delay, 0, 1)
            out = out_file_h5.createCArray(
                out_file_h5.root,
                name="HistPixelTimewalkPerPlsrDac",
                title="Time walk per pixel and PlsrDAC",
                atom=tb.Atom.from_dtype(timewalk_result.dtype),
                shape=timewalk_result.shape,
                filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
            )
            out_2 = out_file_h5.createCArray(
                out_file_h5.root,
                name="HistPixelHitDelayPerPlsrDac",
                title="Hit delay per pixel and PlsrDAC",
                atom=tb.Atom.from_dtype(hit_delay_result.dtype),
                shape=hit_delay_result.shape,
                filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
            )
            out_3 = out_file_h5.createCArray(
                out_file_h5.root,
                name="HistTotPerPlsrDac",
                title="Tot per PlsrDAC",
                atom=tb.Atom.from_dtype(tot.dtype),
                shape=tot.shape,
                filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
            )
            out.attrs.dimensions = "column, row, PlsrDAC"
            out.attrs.delay_calibration = delay_calibration
            out.attrs.delay_calibration_error = delay_calibration_error
            out.attrs.plsr_dac_values = plsr_dac_values
            out_2.attrs.dimensions = "column, row, PlsrDAC"
            out_2.attrs.delay_calibration = delay_calibration
            out_2.attrs.delay_calibration_error = delay_calibration_error
            out_2.attrs.plsr_dac_values = plsr_dac_values
            out_3.attrs.dimensions = "PlsrDAC"
            out_3.attrs.plsr_dac_values = plsr_dac_values
            out[:] = timewalk_result
            out_2[:] = hit_delay_result
            out_3[:] = tot

        # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels
        with tb.open_file(self.output_filename + "_analyzed.h5", mode="r") as in_file_h5:

            def plot_hit_delay(
                hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None
            ):
                # Interpolate tot values for second tot axis
                interpolation = interp1d(tot_values, charge_values, kind="slinear", bounds_error=True)
                tot = np.arange(16)
                tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))]

                array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(
                    hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]
                )
                y = np.mean(array, axis=1)
                y_err = np.std(array, axis=1)

                fig = Figure()
                FigureCanvas(fig)
                ax = fig.add_subplot(111)
                fig.patch.set_facecolor("white")
                ax.grid(True)
                ax.set_xlabel(xlabel)
                ax.set_ylabel(ylabel)
                ax.set_xlim((0, np.amax(charge_values)))
                ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err)))
                ax.plot(charge_values, y, ".-", color="black", label=title)
                if threshold is not None:
                    ax.plot(
                        [threshold, threshold],
                        [np.amin(y - y_err), np.amax(y + y_err)],
                        linestyle="--",
                        color="black",
                        label="Threshold\n%d e" % (threshold),
                    )
                ax.fill_between(
                    charge_values, y - y_err, y + y_err, color="gray", alpha=0.5, facecolor="gray", label="RMS"
                )
                ax2 = ax.twiny()
                ax2.set_xlabel("ToT")

                ticklab = ax2.xaxis.get_ticklabels()[0]
                trans = ticklab.get_transform()
                ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans)
                ax2.set_xlim(ax.get_xlim())
                ax2.set_xticks(interpolation(tot))
                ax2.set_xticklabels([str(int(i)) for i in tot])
                ax.text(0.5, 1.07, title, horizontalalignment="center", fontsize=18, transform=ax2.transAxes)
                ax.legend()
                filename.savefig(fig)

            plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
            delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration
            charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope
            hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :]
            hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :]
            tot = in_file_h5.root.HistTotPerPlsrDac[:]

            hist_rel_timewalk = np.amax(hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk
            hist_rel_hit_delay = np.mean(hist_hit_delay[:, :, -1]) - hist_hit_delay

            # Create mask and apply for bad pixels
            mask = np.ones(hist_rel_timewalk.shape, dtype=np.int8)
            for node in in_file_h5.root.PixelHistsMeanRelBcid:
                pixel_data = node[:, :, :]
                a = np.sum(pixel_data, axis=2)
                mask[np.isfinite(a), :] = 0

            hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask)
            hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask)

            output_pdf = PdfPages(self.output_filename + ".pdf")
            plot_hit_delay(
                np.swapaxes(hist_rel_timewalk, 0, 1) * 25.0 / delay_calibration,
                charge_values=charge_values,
                title="Time walk",
                xlabel="Charge [e]",
                ylabel="Time walk [ns]",
                filename=output_pdf,
                threshold=np.amin(charge_values),
                tot_values=tot,
            )
            plot_hit_delay(
                np.swapaxes(hist_rel_hit_delay, 0, 1) * 25.0 / delay_calibration,
                charge_values=charge_values,
                title="Hit delay",
                xlabel="Charge [e]",
                ylabel="Hit delay [ns]",
                filename=output_pdf,
                threshold=np.amin(charge_values),
                tot_values=tot,
            )
            plot_scurves(
                np.swapaxes(hist_rel_timewalk, 0, 1),
                scan_parameters=charge_values,
                title="Timewalk of the FE-I4",
                scan_parameter_name="Charge [e]",
                ylabel="Timewalk [ns]",
                min_x=0,
                y_scale=25.0 / delay_calibration,
                filename=output_pdf,
            )
            plot_scurves(
                np.swapaxes(hist_hit_delay[:, :, :], 0, 1),
                scan_parameters=charge_values,
                title="Hit delay (T0) with internal charge injection\nof the FE-I4",
                scan_parameter_name="Charge [e]",
                ylabel="Hit delay [ns]",
                min_x=0,
                y_scale=25.0 / delay_calibration,
                filename=output_pdf,
            )

            for i in [
                0,
                1,
                len(plsr_dac_values) / 4,
                len(plsr_dac_values) / 2,
                -1,
            ]:  # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting
                plot_three_way(
                    hist_rel_timewalk[:, :, i] * 25.0 / delay_calibration,
                    title="Time walk at %.0f e" % (charge_values[i]),
                    x_axis_title="Time walk [ns]",
                    filename=output_pdf,
                )
                plot_three_way(
                    hist_hit_delay[:, :, i] * 25.0 / delay_calibration,
                    title="Hit delay (T0) with internal charge injection at %.0f e" % (charge_values[i]),
                    x_axis_title="Hit delay [ns]",
                    minimum=np.amin(hist_hit_delay[:, :, i]),
                    maximum=np.amax(hist_hit_delay[:, :, i]),
                    filename=output_pdf,
                )
            output_pdf.close()
コード例 #35
0
ファイル: tune_gdac.py プロジェクト: CARIBOuSystem/pyBAR
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + ".pdf")
            self.close_plots = True
        else:
            self.close_plots = False
        cal_lvl1_command = (
            self.register.get_commands("CAL")[0]
            + self.register.get_commands("zeros", length=40)[0]
            + self.register.get_commands("LV1")[0]
            + self.register.get_commands("zeros", mask_steps=self.mask_steps_gdac)[0]
        )

        self.write_target_threshold()
        for gdac_bit in self.gdac_tune_bits:  # reset all GDAC bits
            self.set_gdac_bit(gdac_bit, bit_value=0, send_command=False)

        last_bit_result = self.n_injections_gdac
        decreased_threshold = False  # needed to determine if the FE is noisy
        all_bits_zero = True

        def bits_set(int_type):
            int_type = int(int_type)
            position = 0
            bits_set = []
            while int_type:
                if int_type & 1:
                    bits_set.append(position)
                position += 1
                int_type = int_type >> 1
            return bits_set

        # calculate selected pixels from the mask and the disabled columns
        select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)
        if not self.enable_mask_steps_gdac:
            self.enable_mask_steps_gdac = range(self.mask_steps_gdac)
        for mask_step in self.enable_mask_steps_gdac:
            select_mask_array += make_pixel_mask(steps=self.mask_steps_gdac, shift=mask_step)
        for column in bits_set(self.register.get_global_register_value("DisableColumnCnfg")):
            logging.info("Deselect double column %d" % column)
            select_mask_array[column, :] = 0

        additional_scan = True
        occupancy_best = 0
        gdac_best = self.register_utils.get_gdac()
        for gdac_bit in self.gdac_tune_bits:
            if additional_scan:
                self.set_gdac_bit(gdac_bit)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse") << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info("GDAC setting: %d, bit %d = 1", scan_parameter_value, gdac_bit)
            else:
                self.set_gdac_bit(gdac_bit, bit_value=0)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse") << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info("GDAC setting: %d, bit %d = 0", scan_parameter_value, gdac_bit)

            with self.readout(
                GDAC=scan_parameter_value,
                reset_sram_fifo=True,
                fill_buffer=True,
                clear_buffer=True,
                callback=self.handle_data,
            ):
                scan_loop(
                    self,
                    cal_lvl1_command,
                    repeat_command=self.n_injections_gdac,
                    mask_steps=self.mask_steps_gdac,
                    enable_mask_steps=self.enable_mask_steps_gdac,
                    enable_double_columns=None,
                    same_mask_for_all_dc=True,
                    eol_function=None,
                    digital_injection=False,
                    enable_shift_masks=self.enable_shift_masks,
                    disable_shift_masks=self.disable_shift_masks,
                    restore_shift_masks=True,
                    mask=None,
                    double_column_correction=self.pulser_dac_correction,
                )

            occupancy_array, _, _ = np.histogram2d(
                *convert_data_array(
                    data_array_from_data_iterable(self.fifo_readout.data),
                    filter_func=is_data_record,
                    converter_func=get_col_row_array_from_data_record_array,
                ),
                bins=(80, 336),
                range=[[1, 80], [1, 336]]
            )
            self.occ_array_sel_pixel = np.ma.array(
                occupancy_array, mask=np.logical_not(np.ma.make_mask(select_mask_array))
            )  # take only selected pixel into account by creating a mask
            median_occupancy = np.ma.median(self.occ_array_sel_pixel)
            if abs(median_occupancy - self.n_injections_gdac / 2) < abs(occupancy_best - self.n_injections_gdac / 2):
                occupancy_best = median_occupancy
                gdac_best = self.register_utils.get_gdac()

            if self.plot_intermediate_steps:
                plot_three_way(
                    self.occ_array_sel_pixel.transpose(),
                    title="Occupancy (GDAC " + str(scan_parameter_value) + " with tuning bit " + str(gdac_bit) + ")",
                    x_axis_title="Occupancy",
                    filename=self.plots_filename,
                    maximum=self.n_injections_gdac,
                )

            if (
                abs(median_occupancy - self.n_injections_gdac / 2) < self.max_delta_threshold and gdac_bit > 0
            ):  # abort if good value already found to save time
                logging.info(
                    "Median = %f, good result already achieved (median - Ninj/2 < %f), skipping not varied bits",
                    median_occupancy,
                    self.max_delta_threshold,
                )
                break

            if median_occupancy == 0 and decreased_threshold and all_bits_zero:
                logging.info("Chip may be noisy")

            if gdac_bit > 0:
                if (
                    median_occupancy < self.n_injections_gdac / 2
                ):  # set GDAC bit to 0 if the occupancy is too lowm, thus decrease threshold
                    logging.info(
                        "Median = %f < %f, set bit %d = 0", median_occupancy, self.n_injections_gdac / 2, gdac_bit
                    )
                    self.set_gdac_bit(gdac_bit, bit_value=0)
                    decreased_threshold = True
                else:  # set GDAC bit to 1 if the occupancy is too high, thus increase threshold
                    logging.info(
                        "Median = %f > %f, leave bit %d = 1", median_occupancy, self.n_injections_gdac / 2, gdac_bit
                    )
                    decreased_threshold = False
                    all_bits_zero = False
            elif gdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    last_bit_result = self.occ_array_sel_pixel.copy()
                    self.gdac_tune_bits.append(self.gdac_tune_bits[-1])  # the last tune bit has to be scanned twice
                else:
                    last_bit_result_median = np.median(last_bit_result[select_mask_array > 0])
                    logging.info("Scanned bit 0 = 0 with %f instead of %f", median_occupancy, last_bit_result_median)
                    if abs(median_occupancy - self.n_injections_gdac / 2) > abs(
                        last_bit_result_median - self.n_injections_gdac / 2
                    ):  # if bit 0 = 0 is worse than bit 0 = 1, so go back
                        self.set_gdac_bit(gdac_bit, bit_value=1)
                        logging.info("Set bit 0 = 1")
                        self.occ_array_sel_pixel = last_bit_result
                        median_occupancy = np.ma.median(self.occ_array_sel_pixel)
                    else:
                        logging.info("Set bit 0 = 0")
                    if abs(occupancy_best - self.n_injections_gdac / 2) < abs(
                        median_occupancy - self.n_injections_gdac / 2
                    ):
                        logging.info("Binary search converged to non optimal value, take best measured value instead")
                        median_occupancy = occupancy_best
                        self.register_utils.set_gdac(gdac_best, send_command=False)

        self.gdac_best = self.register_utils.get_gdac()

        if np.all((((self.gdac_best & (1 << np.arange(16)))) > 0).astype(int)[self.gdac_tune_bits[:-2]] == 1):
            logging.warning("Selected GDAC bits reached maximum value")
        elif np.all((((self.gdac_best & (1 << np.arange(16)))) > 0).astype(int)[self.gdac_tune_bits] == 0):
            logging.warning("Selected GDAC bits reached minimum value")

        if abs(median_occupancy - self.n_injections_gdac / 2) > 2 * self.max_delta_threshold:
            logging.warning(
                "Global threshold tuning failed. Delta threshold = %f > %f. Vthin_AltCoarse / Vthin_AltFine = %d / %d",
                abs(median_occupancy - self.n_injections_gdac / 2),
                self.max_delta_threshold,
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"),
            )
        else:
            logging.info(
                "Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d",
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"),
            )

        self.gdac_best = self.register_utils.get_gdac()
コード例 #36
0
ファイル: tune_gdac.py プロジェクト: CARIBOuSystem/pyBAR
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False
        cal_lvl1_command = self.register.get_commands(
            "CAL")[0] + self.register.get_commands(
                "zeros", length=40)[0] + self.register.get_commands(
                    "LV1")[0] + self.register.get_commands(
                        "zeros", mask_steps=self.mask_steps_gdac)[0]

        self.write_target_threshold()
        for gdac_bit in self.gdac_tune_bits:  # reset all GDAC bits
            self.set_gdac_bit(gdac_bit, bit_value=0, send_command=False)

        last_bit_result = self.n_injections_gdac
        decreased_threshold = False  # needed to determine if the FE is noisy
        all_bits_zero = True

        def bits_set(int_type):
            int_type = int(int_type)
            position = 0
            bits_set = []
            while (int_type):
                if (int_type & 1):
                    bits_set.append(position)
                position += 1
                int_type = int_type >> 1
            return bits_set

        # calculate selected pixels from the mask and the disabled columns
        select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)
        if not self.enable_mask_steps_gdac:
            self.enable_mask_steps_gdac = range(self.mask_steps_gdac)
        for mask_step in self.enable_mask_steps_gdac:
            select_mask_array += make_pixel_mask(steps=self.mask_steps_gdac,
                                                 shift=mask_step)
        for column in bits_set(
                self.register.get_global_register_value("DisableColumnCnfg")):
            logging.info('Deselect double column %d' % column)
            select_mask_array[column, :] = 0

        additional_scan = True
        occupancy_best = 0
        gdac_best = self.register_utils.get_gdac()
        for gdac_bit in self.gdac_tune_bits:
            if additional_scan:
                self.set_gdac_bit(gdac_bit)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse")
                    << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info('GDAC setting: %d, bit %d = 1',
                             scan_parameter_value, gdac_bit)
            else:
                self.set_gdac_bit(gdac_bit, bit_value=0)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse")
                    << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info('GDAC setting: %d, bit %d = 0',
                             scan_parameter_value, gdac_bit)

            with self.readout(GDAC=scan_parameter_value,
                              reset_sram_fifo=True,
                              fill_buffer=True,
                              clear_buffer=True,
                              callback=self.handle_data):
                scan_loop(self,
                          cal_lvl1_command,
                          repeat_command=self.n_injections_gdac,
                          mask_steps=self.mask_steps_gdac,
                          enable_mask_steps=self.enable_mask_steps_gdac,
                          enable_double_columns=None,
                          same_mask_for_all_dc=True,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            occupancy_array, _, _ = np.histogram2d(*convert_data_array(
                data_array_from_data_iterable(self.fifo_readout.data),
                filter_func=is_data_record,
                converter_func=get_col_row_array_from_data_record_array),
                                                   bins=(80, 336),
                                                   range=[[1, 80], [1, 336]])
            self.occ_array_sel_pixel = np.ma.array(
                occupancy_array,
                mask=np.logical_not(np.ma.make_mask(select_mask_array))
            )  # take only selected pixel into account by creating a mask
            median_occupancy = np.ma.median(self.occ_array_sel_pixel)
            if abs(median_occupancy - self.n_injections_gdac /
                   2) < abs(occupancy_best - self.n_injections_gdac / 2):
                occupancy_best = median_occupancy
                gdac_best = self.register_utils.get_gdac()

            if self.plot_intermediate_steps:
                plot_three_way(self.occ_array_sel_pixel.transpose(),
                               title="Occupancy (GDAC " +
                               str(scan_parameter_value) +
                               " with tuning bit " + str(gdac_bit) + ")",
                               x_axis_title='Occupancy',
                               filename=self.plots_filename,
                               maximum=self.n_injections_gdac)

            if abs(
                    median_occupancy - self.n_injections_gdac / 2
            ) < self.max_delta_threshold and gdac_bit > 0:  # abort if good value already found to save time
                logging.info(
                    'Median = %f, good result already achieved (median - Ninj/2 < %f), skipping not varied bits',
                    median_occupancy, self.max_delta_threshold)
                break

            if median_occupancy == 0 and decreased_threshold and all_bits_zero:
                logging.info('Chip may be noisy')

            if gdac_bit > 0:
                if (
                        median_occupancy < self.n_injections_gdac / 2
                ):  # set GDAC bit to 0 if the occupancy is too lowm, thus decrease threshold
                    logging.info('Median = %f < %f, set bit %d = 0',
                                 median_occupancy, self.n_injections_gdac / 2,
                                 gdac_bit)
                    self.set_gdac_bit(gdac_bit, bit_value=0)
                    decreased_threshold = True
                else:  # set GDAC bit to 1 if the occupancy is too high, thus increase threshold
                    logging.info('Median = %f > %f, leave bit %d = 1',
                                 median_occupancy, self.n_injections_gdac / 2,
                                 gdac_bit)
                    decreased_threshold = False
                    all_bits_zero = False
            elif gdac_bit == 0:
                if additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    last_bit_result = self.occ_array_sel_pixel.copy()
                    self.gdac_tune_bits.append(
                        self.gdac_tune_bits[-1]
                    )  # the last tune bit has to be scanned twice
                else:
                    last_bit_result_median = np.median(
                        last_bit_result[select_mask_array > 0])
                    logging.info('Scanned bit 0 = 0 with %f instead of %f',
                                 median_occupancy, last_bit_result_median)
                    if abs(median_occupancy - self.n_injections_gdac / 2) > abs(
                            last_bit_result_median - self.n_injections_gdac / 2
                    ):  # if bit 0 = 0 is worse than bit 0 = 1, so go back
                        self.set_gdac_bit(gdac_bit, bit_value=1)
                        logging.info('Set bit 0 = 1')
                        self.occ_array_sel_pixel = last_bit_result
                        median_occupancy = np.ma.median(
                            self.occ_array_sel_pixel)
                    else:
                        logging.info('Set bit 0 = 0')
                    if abs(occupancy_best - self.n_injections_gdac / 2) < abs(
                            median_occupancy - self.n_injections_gdac / 2):
                        logging.info(
                            "Binary search converged to non optimal value, take best measured value instead"
                        )
                        median_occupancy = occupancy_best
                        self.register_utils.set_gdac(gdac_best,
                                                     send_command=False)

        self.gdac_best = self.register_utils.get_gdac()

        if np.all((((self.gdac_best & (1 << np.arange(16)))) > 0
                   ).astype(int)[self.gdac_tune_bits[:-2]] == 1):
            logging.warning('Selected GDAC bits reached maximum value')
        elif np.all((((self.gdac_best & (1 << np.arange(16)))) > 0
                     ).astype(int)[self.gdac_tune_bits] == 0):
            logging.warning('Selected GDAC bits reached minimum value')

        if abs(median_occupancy -
               self.n_injections_gdac / 2) > 2 * self.max_delta_threshold:
            logging.warning(
                'Global threshold tuning failed. Delta threshold = %f > %f. Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                abs(median_occupancy - self.n_injections_gdac / 2),
                self.max_delta_threshold,
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"))
        else:
            logging.info(
                'Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"))

        self.gdac_best = self.register_utils.get_gdac()
コード例 #37
0
    def scan(self):
        if not self.plots_filename:
            self.plots_filename = PdfPages(self.output_filename + '.pdf')
            self.close_plots = True
        else:
            self.close_plots = False
        cal_lvl1_command = self.register.get_commands(
            "CAL")[0] + self.register.get_commands(
                "zeros", length=40)[0] + self.register.get_commands("LV1")[0]

        self.write_target_threshold()

        for gdac_bit in self.gdac_tune_bits:  # reset all GDAC bits
            self.set_gdac_bit(gdac_bit, bit_value=0, send_command=False)

        def bits_set(int_type):
            int_type = int(int_type)
            position = 0
            bits_set = []
            while (int_type):
                if (int_type & 1):
                    bits_set.append(position)
                position += 1
                int_type = int_type >> 1
            return bits_set

        # calculate selected pixels from the mask and the disabled columns
        select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)
        self.occ_array_sel_pixels_best = select_mask_array.copy()
        self.occ_array_desel_pixels_best = select_mask_array.copy()
        if not self.enable_mask_steps_gdac:
            self.enable_mask_steps_gdac = range(self.mask_steps)
        for mask_step in self.enable_mask_steps_gdac:
            select_mask_array += make_pixel_mask(steps=self.mask_steps,
                                                 shift=mask_step)
        for column in bits_set(
                self.register.get_global_register_value("DisableColumnCnfg")):
            logging.info('Deselect double column %d' % column)
            select_mask_array[column, :] = 0

        additional_scan = True
        additional_scan_ongoing = False
        occupancy_best = 0.0
        last_good_gdac_bit = self.gdac_tune_bits[0]
        last_good_gdac_scan_step = 0
        gdac_tune_bits_permutation = 0
        gdac_best = self.register_utils.get_gdac()
        gdac_tune_bits = self.gdac_tune_bits[:]
        min_gdac_with_occupancy = None
        for gdac_scan_step, gdac_bit in enumerate(gdac_tune_bits):
            if additional_scan:
                self.set_gdac_bit(gdac_bit, bit_value=1, send_command=True)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse")
                    << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info('GDAC setting: %d, set bit %d = 1',
                             scan_parameter_value, gdac_bit)
            else:
                self.set_gdac_bit(gdac_bit, bit_value=0, send_command=True)
                scan_parameter_value = (
                    self.register.get_global_register_value("Vthin_AltCoarse")
                    << 8
                ) + self.register.get_global_register_value("Vthin_AltFine")
                logging.info('GDAC setting: %d, set bit %d = 0',
                             scan_parameter_value, gdac_bit)

            with self.readout(GDAC=scan_parameter_value,
                              reset_sram_fifo=True,
                              fill_buffer=True,
                              clear_buffer=True,
                              callback=self.handle_data):
                scan_loop(self,
                          command=cal_lvl1_command,
                          repeat_command=self.n_injections_gdac,
                          mask_steps=self.mask_steps,
                          enable_mask_steps=self.enable_mask_steps_gdac,
                          enable_double_columns=None,
                          same_mask_for_all_dc=self.same_mask_for_all_dc,
                          eol_function=None,
                          digital_injection=False,
                          enable_shift_masks=self.enable_shift_masks,
                          disable_shift_masks=self.disable_shift_masks,
                          restore_shift_masks=True,
                          mask=None,
                          double_column_correction=self.pulser_dac_correction)

            occupancy_array, _, _ = np.histogram2d(*convert_data_array(
                data_array_from_data_iterable(self.fifo_readout.data),
                filter_func=logical_and(is_fe_word, is_data_record),
                converter_func=get_col_row_array_from_data_record_array),
                                                   bins=(80, 336),
                                                   range=[[1, 80], [1, 336]])
            occ_array_sel_pixels = np.ma.array(
                occupancy_array,
                mask=np.logical_not(np.ma.make_mask(select_mask_array))
            )  # take only selected pixel into account by using the mask
            occ_array_desel_pixels = np.ma.array(
                occupancy_array, mask=np.ma.make_mask(select_mask_array)
            )  # take only de-selected pixel into account by using the inverted mask
            median_occupancy = np.ma.median(occ_array_sel_pixels)
            noise_occupancy = np.ma.median(occ_array_desel_pixels)
            occupancy_almost_zero = np.allclose(median_occupancy, 0)
            no_noise = np.allclose(noise_occupancy, 0)
            if abs(median_occupancy - self.n_injections_gdac /
                   2) < abs(occupancy_best - self.n_injections_gdac / 2):
                occupancy_best = median_occupancy
                gdac_best = self.register_utils.get_gdac()
                self.occ_array_sel_pixels_best = occ_array_sel_pixels.copy()
                self.occ_array_desel_pixels_best = occ_array_desel_pixels.copy(
                )

            if self.plot_intermediate_steps:
                plot_three_way(self.occ_array_sel_pixel.transpose(),
                               title="Occupancy (GDAC " +
                               str(scan_parameter_value) +
                               " with tuning bit " + str(gdac_bit) + ")",
                               x_axis_title='Occupancy',
                               filename=self.plots_filename,
                               maximum=self.n_injections_gdac)

            if not occupancy_almost_zero and no_noise:
                if min_gdac_with_occupancy is None:
                    min_gdac_with_occupancy = self.register_utils.get_gdac()
                else:
                    min_gdac_with_occupancy = min(
                        min_gdac_with_occupancy,
                        self.register_utils.get_gdac())

            if gdac_bit > 0:
                # GDAC too low, no hits
                if occupancy_almost_zero and no_noise and self.register_utils.get_gdac(
                ) < min_gdac_with_occupancy:
                    logging.info(
                        'Median = %.2f > %.2f, GDAC possibly too low, keep bit %d = 1',
                        median_occupancy, self.n_injections_gdac / 2, gdac_bit)
                # GDAC too high, less hits, decrease GDAC
                elif no_noise and median_occupancy < (
                        self.n_injections_gdac / 2
                ):  # set GDAC bit to 0 if the occupancy is too low, thus decrease threshold
                    try:
                        next_gdac_bit = gdac_tune_bits[gdac_scan_step + 1]
                    except IndexError:
                        next_gdac_bit = None
                    # check if new value is below lower limit
                    if self.gdac_lower_limit and (
                            next_gdac_bit is not None
                            and self.register_utils.get_gdac() - 2**gdac_bit +
                            2**next_gdac_bit < self.gdac_lower_limit) or (
                                next_gdac_bit is None
                                and self.register_utils.get_gdac() -
                                2**gdac_bit < self.gdac_lower_limit):
                        logging.info(
                            'Median = %.2f < %.2f, reaching lower GDAC limit, keep bit %d = 1',
                            median_occupancy, self.n_injections_gdac / 2,
                            gdac_bit)
                    else:
                        logging.info('Median = %.2f < %.2f, set bit %d = 0',
                                     median_occupancy,
                                     self.n_injections_gdac / 2, gdac_bit)
                        self.set_gdac_bit(
                            gdac_bit, bit_value=0, send_command=False
                        )  # do not write, might be too low, do this in next iteration
                # GDAC too low, more hits
                else:
                    logging.info('Median = %.2f > %.2f, keep bit %d = 1',
                                 median_occupancy, self.n_injections_gdac / 2,
                                 gdac_bit)
            elif gdac_bit == 0:
                if not additional_scan_ongoing and (
                    (occupancy_almost_zero and no_noise) or not no_noise
                ) and len(self.gdac_tune_bits) > last_good_gdac_scan_step + 2:
                    self.set_gdac_bit(0, bit_value=0,
                                      send_command=False)  # turn off LSB
                    if len(
                            gdac_tune_bits
                    ) == gdac_scan_step + 1 and gdac_tune_bits_permutation == 0:  # min. 2 bits for bin search
                        self.set_gdac_bit(
                            last_good_gdac_bit,
                            bit_value=1,
                            send_command=False)  # always enable highest bit
                        gdac_tune_bits.extend(
                            self.gdac_tune_bits[last_good_gdac_scan_step + 1:]
                        )  # repeat all scan stept from last bit
                        for gdac_clear_bit in self.gdac_tune_bits[:
                                                                  last_good_gdac_scan_step]:
                            self.set_gdac_bit(gdac_clear_bit,
                                              bit_value=0,
                                              send_command=False)
                        if 2**last_good_gdac_scan_step == 1:  # last step, cleanup
                            last_good_gdac_bit = self.gdac_tune_bits[
                                last_good_gdac_scan_step + 1]
                            last_good_gdac_scan_step += 1
                        else:
                            gdac_tune_bits_permutation += 1
                    else:
                        gdac_tune_bits_permutation_header = map(
                            int,
                            bin(gdac_tune_bits_permutation)[2:].zfill(
                                last_good_gdac_scan_step))
                        for gdac_permutation_bit, gdac_permutation_bit_value in enumerate(
                                gdac_tune_bits_permutation_header):
                            self.set_gdac_bit(
                                self.gdac_tune_bits[gdac_permutation_bit],
                                bit_value=gdac_permutation_bit_value,
                                send_command=False)
                        gdac_tune_bits.extend(
                            self.gdac_tune_bits[last_good_gdac_scan_step + 1:])
                        if 2**last_good_gdac_scan_step > gdac_tune_bits_permutation + 1:
                            gdac_tune_bits_permutation += 1
                        else:  # last step, cleanup
                            gdac_tune_bits_permutation = 0
                            last_good_gdac_bit = self.gdac_tune_bits[
                                last_good_gdac_scan_step + 1]
                            last_good_gdac_scan_step += 1
                elif additional_scan:  # scan bit = 0 with the correct value again
                    additional_scan = False
                    additional_scan_ongoing = True
                    last_occ_array_sel_pixels = occ_array_sel_pixels.copy()
                    last_occ_array_desel_pixels = occ_array_desel_pixels.copy()
                    gdac_tune_bits.append(
                        0)  # the last tune bit has to be scanned twice
                else:
                    additional_scan_ongoing = False
                    last_median_occupancy = np.ma.median(
                        last_occ_array_sel_pixels)
                    logging.info(
                        'Measured %.2f with bit 0 = 0 with and %.2f with bit 0 = 1',
                        median_occupancy, last_median_occupancy)
                    if abs(median_occupancy - self.n_injections_gdac / 2) > abs(
                            last_median_occupancy - self.n_injections_gdac / 2
                    ):  # if bit 0 = 0 is worse than bit 0 = 1, so go back
                        logging.info('Set bit 0 = 1')
                        self.set_gdac_bit(0, bit_value=1, send_command=True)
                        occ_array_sel_pixels = last_occ_array_sel_pixels.copy()
                        occ_array_desel_pixels = last_occ_array_desel_pixels.copy(
                        )
                        median_occupancy = last_median_occupancy
                    else:
                        logging.info('Keep bit 0 = 0')

        # select best GDAC value
        if abs(occupancy_best -
               self.n_injections_gdac / 2) < abs(median_occupancy -
                                                 self.n_injections_gdac / 2):
            logging.info(
                "Binary search converged to non-optimal value, apply best GDAC value, change GDAC from %d to %d",
                self.register_utils.get_gdac(), gdac_best)
            median_occupancy = occupancy_best
            self.register_utils.set_gdac(gdac_best, send_command=False)

        self.gdac_best = self.register_utils.get_gdac()

        if abs(median_occupancy -
               self.n_injections_gdac / 2) > self.max_delta_threshold:
            if np.all((((self.gdac_best & (1 << np.arange(
                    self.register.global_registers['Vthin_AltFine']
                ['bitlength'] + self.register.global_registers['Vthin_AltFine']
                ['bitlength'])))) > 0).astype(int)[self.gdac_tune_bits] == 1):
                if self.fail_on_warning:
                    raise RuntimeWarning(
                        'Selected GDAC bits reached maximum value')
                else:
                    logging.warning('Selected GDAC bits reached maximum value')
            elif np.all((((self.gdac_best & (1 << np.arange(
                    self.register.global_registers['Vthin_AltFine']
                ['bitlength'] + self.register.global_registers['Vthin_AltFine']
                ['bitlength'])))) > 0).astype(int)[self.gdac_tune_bits] == 0):
                if self.fail_on_warning:
                    raise RuntimeWarning(
                        'Selected GDAC bits reached minimum value')
                else:
                    logging.warning('Selected GDAC bits reached minimum value')
            else:
                if self.fail_on_warning:
                    raise RuntimeWarning(
                        'Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d'
                        % (abs(median_occupancy - self.n_injections_gdac / 2),
                           self.max_delta_threshold,
                           self.register.get_global_register_value(
                               "Vthin_AltCoarse"),
                           self.register.get_global_register_value(
                               "Vthin_AltFine")))
                else:
                    logging.warning(
                        'Global threshold tuning failed. Delta threshold = %.2f > %.2f. Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                        abs(median_occupancy - self.n_injections_gdac / 2),
                        self.max_delta_threshold,
                        self.register.get_global_register_value(
                            "Vthin_AltCoarse"),
                        self.register.get_global_register_value(
                            "Vthin_AltFine"))
        else:
            logging.info(
                'Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d',
                self.register.get_global_register_value("Vthin_AltCoarse"),
                self.register.get_global_register_value("Vthin_AltFine"))
コード例 #38
0
ファイル: calibrate_threshold.py プロジェクト: makoc/pyBAR
def create_threshold_calibration(
    scan_base_file_name,
    create_plots=True
):  # Create calibration function, can be called stand alone
    def analyze_raw_data_file(file_name):
        if os.path.isfile(file_name[:-3] +
                          '_interpreted.h5'):  # skip analysis if already done
            logging.warning('Analyzed data file ' + file_name +
                            ' already exists. Skip analysis for this file.')
        else:
            with AnalyzeRawData(raw_data_file=file_name,
                                create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_tot_pixel_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(
                    False)  # RX errors would fill the console
                analyze_raw_data.interpret_word_table()

    def store_calibration_data_as_table(out_file_h5,
                                        mean_threshold_calibration,
                                        mean_threshold_rms_calibration,
                                        threshold_calibration,
                                        parameter_values):
        logging.info("Storing calibration data in a table...")
        filter_table = tb.Filters(complib='blosc',
                                  complevel=5,
                                  fletcher32=False)
        mean_threshold_calib_table = out_file_h5.createTable(
            out_file_h5.root,
            name='MeanThresholdCalibration',
            description=data_struct.MeanThresholdCalibrationTable,
            title='mean_threshold_calibration',
            filters=filter_table)
        threshold_calib_table = out_file_h5.createTable(
            out_file_h5.root,
            name='ThresholdCalibration',
            description=data_struct.ThresholdCalibrationTable,
            title='threshold_calibration',
            filters=filter_table)
        for column in range(80):
            for row in range(336):
                for parameter_value_index, parameter_value in enumerate(
                        parameter_values):
                    threshold_calib_table.row['column'] = column
                    threshold_calib_table.row['row'] = row
                    threshold_calib_table.row[
                        'parameter_value'] = parameter_value
                    threshold_calib_table.row[
                        'threshold'] = threshold_calibration[
                            column, row, parameter_value_index]
                    threshold_calib_table.row.append()
        for parameter_value_index, parameter_value in enumerate(
                parameter_values):
            mean_threshold_calib_table.row['parameter_value'] = parameter_value
            mean_threshold_calib_table.row[
                'mean_threshold'] = mean_threshold_calibration[
                    parameter_value_index]
            mean_threshold_calib_table.row[
                'threshold_rms'] = mean_threshold_rms_calibration[
                    parameter_value_index]
            mean_threshold_calib_table.row.append()
        threshold_calib_table.flush()
        mean_threshold_calib_table.flush()
        logging.info("done")

    def store_calibration_data_as_array(out_file_h5,
                                        mean_threshold_calibration,
                                        mean_threshold_rms_calibration,
                                        threshold_calibration, parameter_name,
                                        parameter_values):
        logging.info("Storing calibration data in an array...")
        filter_table = tb.Filters(complib='blosc',
                                  complevel=5,
                                  fletcher32=False)
        mean_threshold_calib_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdMeanCalibration',
            atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
            shape=mean_threshold_calibration.shape,
            title='mean_threshold_calibration',
            filters=filter_table)
        mean_threshold_calib_rms_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdRMSCalibration',
            atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
            shape=mean_threshold_calibration.shape,
            title='mean_threshold_rms_calibration',
            filters=filter_table)
        threshold_calib_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdCalibration',
            atom=tb.Atom.from_dtype(threshold_calibration.dtype),
            shape=threshold_calibration.shape,
            title='threshold_calibration',
            filters=filter_table)
        mean_threshold_calib_array[:] = mean_threshold_calibration
        mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
        threshold_calib_array[:] = threshold_calibration
        mean_threshold_calib_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        mean_threshold_calib_rms_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        threshold_calib_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
        mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
        threshold_calib_array.attrs.scan_parameter_values = parameter_values

        logging.info("done")

    def mask_columns(pixel_array, ignore_columns):
        idx = np.array(ignore_columns) - 1  # from FE to Array columns
        m = np.zeros_like(pixel_array)
        m[:, idx] = 1
        return np.ma.masked_array(pixel_array, m)

    raw_data_files = analysis_utils.get_data_file_names_from_scan_base(
        scan_base_file_name,
        filter_file_words=['interpreted', 'calibration_calibration'])
    first_scan_base_file_name = scan_base_file_name if isinstance(
        scan_base_file_name, basestring) else scan_base_file_name[
            0]  # multilpe scan_base_file_names for multiple runs

    with tb.openFile(
            first_scan_base_file_name + '.h5', mode="r"
    ) as in_file_h5:  # deduce scan parameters from the first (and often only) scan base file name
        ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(
            in_file_h5.root.configuration.run_conf[:]['name'] ==
            'ignore_columns')]['value'][0]
        parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(
            in_file_h5.root.configuration.run_conf[:]['name'] ==
            'scan_parameters')]['value'][0]
        ignore_columns = ast.literal_eval(ignore_columns)
        parameter_name = ast.literal_eval(parameter_name)[1][0]

    calibration_file = first_scan_base_file_name + '_calibration'

    for raw_data_file in raw_data_files:  # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
        analyze_raw_data_file(raw_data_file)

    files_per_parameter = analysis_utils.get_parameter_value_from_file_names(
        [file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files],
        parameter_name,
        unique=True,
        sort=True)

    logging.info("Create calibration from data")
    mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ),
                                          dtype='<f8')
    mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files), ),
                                              dtype='<f8')
    threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)),
                                     dtype='<f8')

    if create_plots:
        logging.info('Saving calibration plots in: %s',
                     calibration_file + '.pdf')
        output_pdf = PdfPages(calibration_file + '.pdf')

    progress_bar = progressbar.ProgressBar(widgets=[
        '',
        progressbar.Percentage(), ' ',
        progressbar.Bar(marker='*', left='|', right='|'), ' ',
        progressbar.AdaptiveETA()
    ],
                                           maxval=len(
                                               files_per_parameter.items()),
                                           term_width=80)
    progress_bar.start()
    parameter_values = []
    for index, (analyzed_data_file,
                parameters) in enumerate(files_per_parameter.items()):
        parameter_values.append(parameters.values()[0][0])
        with tb.openFile(analyzed_data_file, mode="r") as in_file_h5:
            occupancy_masked = mask_columns(
                pixel_array=in_file_h5.root.HistOcc[:],
                ignore_columns=ignore_columns
            )  # mask the not scanned columns for analysis and plotting
            thresholds_masked = mask_columns(
                pixel_array=in_file_h5.root.HistThresholdFitted[:],
                ignore_columns=ignore_columns)
            if create_plots:
                plot_three_way(hist=thresholds_masked,
                               title='Threshold Fitted for ' +
                               parameters.keys()[0] + ' = ' +
                               str(parameters.values()[0][0]),
                               filename=output_pdf)
                plsr_dacs = analysis_utils.get_scan_parameter(
                    meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
                plot_scurves(occupancy_hist=occupancy_masked,
                             scan_parameters=plsr_dacs,
                             scan_parameter_name='PlsrDAC',
                             filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[index] = np.ma.std(
                thresholds_masked)
            threshold_calibration[:, :, index] = thresholds_masked.T
        progress_bar.update(index)
    progress_bar.finish()

    with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5:
        store_calibration_data_as_array(
            out_file_h5=out_file_h5,
            mean_threshold_calibration=mean_threshold_calibration,
            mean_threshold_rms_calibration=mean_threshold_rms_calibration,
            threshold_calibration=threshold_calibration,
            parameter_name=parameter_name,
            parameter_values=parameter_values)
        store_calibration_data_as_table(
            out_file_h5=out_file_h5,
            mean_threshold_calibration=mean_threshold_calibration,
            mean_threshold_rms_calibration=mean_threshold_rms_calibration,
            threshold_calibration=threshold_calibration,
            parameter_values=parameter_values)

    if create_plots:
        plot_scatter(x=parameter_values,
                     y=mean_threshold_calibration,
                     title='Threshold calibration',
                     x_label=parameter_name,
                     y_label='Mean threshold',
                     log_x=False,
                     filename=output_pdf)
        plot_scatter(x=parameter_values,
                     y=mean_threshold_calibration,
                     title='Threshold calibration',
                     x_label=parameter_name,
                     y_label='Mean threshold',
                     log_x=True,
                     filename=output_pdf)
        output_pdf.close()
コード例 #39
0
ファイル: scan_hit_delay.py プロジェクト: CARIBOuSystem/pyBAR
    def analyze(self):
        #         plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1']
        plsr_dac_slope = 55.

        # Interpret data and create hit table
        with AnalyzeRawData(raw_data_file=self.output_filename,
                            create_pdf=False) as analyze_raw_data:
            analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histograming
            analyze_raw_data.create_hit_table = True
            analyze_raw_data.interpreter.set_warning_output(
                False)  # a lot of data produces unknown words
            analyze_raw_data.interpret_word_table()
            analyze_raw_data.interpreter.print_summary()

        # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting
        with tb.open_file(self.output_filename + '_analyzed.h5',
                          mode="w") as out_file_h5:
            hists_folder = out_file_h5.create_group(out_file_h5.root,
                                                    'PixelHistsMeanRelBcid')
            hists_folder_2 = out_file_h5.create_group(out_file_h5.root,
                                                      'PixelHistsRelBcid')
            hists_folder_3 = out_file_h5.create_group(out_file_h5.root,
                                                      'PixelHistsTot')
            hists_folder_4 = out_file_h5.create_group(out_file_h5.root,
                                                      'PixelHistsMeanTot')
            hists_folder_5 = out_file_h5.create_group(out_file_h5.root,
                                                      'HistsTot')

            def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array):
                logging.debug('Store histograms for PlsrDAC ' +
                              str(old_plsr_dac))
                bcid_mean_array = np.average(
                    bcid_array, axis=3, weights=range(0, 16)
                ) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype(
                    'f4'
                )  # calculate the mean BCID per pixel and scan parameter
                tot_pixel_mean_array = np.average(
                    tot_pixel_array, axis=3, weights=range(0, 16)
                ) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype(
                    'f4'
                )  # calculate the mean tot per pixel and scan parameter
                bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1)
                bcid_result = np.swapaxes(bcid_array, 0, 1)
                tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1)
                tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1)

                out = out_file_h5.createCArray(
                    hists_folder,
                    name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' %
                    old_plsr_dac,
                    title=
                    'Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC '
                    + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(bcid_mean_result.dtype),
                    shape=bcid_mean_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out.attrs.dimensions = 'column, row, injection delay'
                out.attrs.injection_delay_values = injection_delay
                out[:] = bcid_mean_result
                out_2 = out_file_h5.createCArray(
                    hists_folder_2,
                    name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac,
                    title=
                    'Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC '
                    + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(bcid_result.dtype),
                    shape=bcid_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_2.attrs.dimensions = 'column, row, injection delay, relative bcid'
                out_2.attrs.injection_delay_values = injection_delay
                out_2[:] = bcid_result
                out_3 = out_file_h5.createCArray(
                    hists_folder_3,
                    name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac,
                    title=
                    'Tot hist per pixel and different PlsrDAC delays for PlsrDAC '
                    + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_pixel_result.dtype),
                    shape=tot_pixel_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_3.attrs.dimensions = 'column, row, injection delay'
                out_3.attrs.injection_delay_values = injection_delay
                out_3[:] = tot_pixel_result
                out_4 = out_file_h5.createCArray(
                    hists_folder_4,
                    name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac,
                    title=
                    'Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC '
                    + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype),
                    shape=tot_mean_pixel_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_4.attrs.dimensions = 'column, row, injection delay'
                out_4.attrs.injection_delay_values = injection_delay
                out_4[:] = tot_mean_pixel_result
                out_5 = out_file_h5.createCArray(
                    hists_folder_5,
                    name='HistTotPlsrDac_%03d' % old_plsr_dac,
                    title='Tot histogram for PlsrDAC ' + str(old_plsr_dac),
                    atom=tb.Atom.from_dtype(tot_array.dtype),
                    shape=tot_array.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_5.attrs.injection_delay_values = injection_delay
                out_5[:] = tot_array

            old_plsr_dac = None

            # Get scan parameters from interpreted file
            with tb.open_file(self.output_filename + '_interpreted.h5',
                              'r') as in_file_h5:
                scan_parameters_dict = get_scan_parameter(
                    in_file_h5.root.meta_data[:])
                plsr_dac = scan_parameters_dict['PlsrDAC']
                hists_folder._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_2._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_3._v_attrs.plsr_dac_values = plsr_dac
                hists_folder_4._v_attrs.plsr_dac_values = plsr_dac
                injection_delay = scan_parameters_dict[scan_parameters_dict.keys(
                )[1]]  # injection delay par name is unknown and should  be in the inner loop
                scan_parameters = scan_parameters_dict.keys()

            bcid_array = np.zeros(
                (80, 336, len(injection_delay), 16),
                dtype=np.int16)  # bcid array of actual PlsrDAC
            tot_pixel_array = np.zeros(
                (80, 336, len(injection_delay), 16),
                dtype=np.int16)  # tot pixel array of actual PlsrDAC
            tot_array = np.zeros((16, ),
                                 dtype=np.int32)  # tot array of actual PlsrDAC

            logging.info('Store histograms for PlsrDAC values ' +
                         str(plsr_dac))
            progress_bar = progressbar.ProgressBar(widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
                                                   maxval=max(plsr_dac) -
                                                   min(plsr_dac),
                                                   term_width=80)

            for index, (parameters, hits) in enumerate(
                    get_hits_of_scan_parameter(self.output_filename +
                                               '_interpreted.h5',
                                               scan_parameters,
                                               chunk_size=1.5e7)):
                if index == 0:
                    progress_bar.start(
                    )  # start after the event index is created to get reasonable ETA
                actual_plsr_dac, actual_injection_delay = parameters[
                    0], parameters[1]
                column, row, rel_bcid, tot = hits['column'] - 1, hits[
                    'row'] - 1, hits['relative_BCID'], hits['tot']
                bcid_array_fast = hist_3d_index(column,
                                                row,
                                                rel_bcid,
                                                shape=(80, 336, 16))
                tot_pixel_array_fast = hist_3d_index(column,
                                                     row,
                                                     tot,
                                                     shape=(80, 336, 16))
                tot_array_fast = hist_1d_index(tot, shape=(16, ))

                if old_plsr_dac != actual_plsr_dac:  # Store the data of the actual PlsrDAC value
                    if old_plsr_dac:  # Special case for the first PlsrDAC setting
                        store_bcid_histograms(bcid_array, tot_array,
                                              tot_pixel_array)
                        progress_bar.update(old_plsr_dac - min(plsr_dac))
                    # Reset the histrograms for the next PlsrDAC setting
                    bcid_array = np.zeros((80, 336, len(injection_delay), 16),
                                          dtype=np.int8)
                    tot_pixel_array = np.zeros(
                        (80, 336, len(injection_delay), 16), dtype=np.int8)
                    tot_array = np.zeros((16, ), dtype=np.int32)
                    old_plsr_dac = actual_plsr_dac
                injection_delay_index = np.where(
                    np.array(injection_delay) == actual_injection_delay)[0][0]
                bcid_array[:, :, injection_delay_index, :] += bcid_array_fast
                tot_pixel_array[:, :,
                                injection_delay_index, :] += tot_pixel_array_fast
                tot_array += tot_array_fast
            store_bcid_histograms(
                bcid_array, tot_array,
                tot_pixel_array)  # save histograms of last PlsrDAC setting
            progress_bar.finish()

        # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel
        with tb.open_file(self.output_filename + '_analyzed.h5',
                          mode="r") as in_file_h5:
            # Create temporary result data structures
            plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
            timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)),
                                dtype=np.int8)  # result array
            tot = np.zeros(shape=(len(plsr_dac_values), ),
                           dtype=np.float16)  # result array
            hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)),
                                 dtype=np.int8)  # result array
            min_rel_bcid = np.zeros(
                shape=(80, 336), dtype=np.int8
            )  # Temp array to make sure that the Scurve from the same BCID is used
            delay_calibration_data = []
            delay_calibration_data_error = []

            # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work.
            plsr_dac_min = min(plsr_dac_values)
            rel_bcid_min_injection = in_file_h5.get_node(
                in_file_h5.root.PixelHistsMeanRelBcid,
                'HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % plsr_dac_min)
            injection_delays = np.array(
                rel_bcid_min_injection.attrs.injection_delay_values)
            injection_delay_min = np.where(
                injection_delays == np.amax(injection_delays))[0][0]
            bcid_min = int(
                round(
                    np.mean(
                        np.ma.masked_array(
                            rel_bcid_min_injection[:, :, injection_delay_min],
                            np.isnan(
                                rel_bcid_min_injection[:, :,
                                                       injection_delay_min]))))
            ) - 1

            # Info output with progressbar
            logging.info('Create timewalk info for PlsrDACs ' +
                         str(plsr_dac_values))
            progress_bar = progressbar.ProgressBar(widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
                                                   maxval=len(plsr_dac_values),
                                                   term_width=80)
            progress_bar.start()

            for index, node in enumerate(
                    in_file_h5.root.PixelHistsMeanRelBcid
            ):  # loop over all mean relative BCID hists for all PlsrDAC values
                # Select the S-curves
                pixel_data = node[:, :, :]
                pixel_data_fixed = pixel_data.reshape(
                    pixel_data.shape[0] * pixel_data.shape[1] *
                    pixel_data.shape[2])  # Reshape for interpolation of Nans
                nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0]
                pixel_data_fixed[nans] = np.interp(
                    x(nans), x(~nans),
                    pixel_data_fixed[~nans])  # interpolate Nans
                pixel_data_fixed = pixel_data_fixed.reshape(
                    pixel_data.shape[0], pixel_data.shape[1],
                    pixel_data.shape[2])  # Reshape after interpolation of Nans
                pixel_data_round = np.round(pixel_data_fixed)
                pixel_data_round_diff = np.diff(pixel_data_round, axis=2)
                index_sel = np.where(
                    np.logical_and(pixel_data_round_diff > 0.,
                                   np.isfinite(pixel_data_round_diff)))

                # Temporary result histograms to be filled
                first_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the first S-curve in the data for the lowest injection (for time walk)
                second_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the second S-curve in the data (to calibrate one inj. delay step)
                a_scurve_mean = np.zeros(
                    shape=(80, 336), dtype=np.int8
                )  # the mean of the S-curve at a given rel. BCID (for hit delay)

                # Loop over the S-curve means
                for (row_index, col_index, delay_index) in np.column_stack(
                    (index_sel)):
                    delay = injection_delays[delay_index]
                    if first_scurve_mean[col_index, row_index] == 0:
                        if delay_index == 0:  # ignore the first index, can be wrong due to nan filling
                            continue
                        if pixel_data_round[
                                row_index, col_index, delay] >= min_rel_bcid[
                                    col_index,
                                    row_index]:  # make sure to always use the data of the same BCID
                            first_scurve_mean[col_index, row_index] = delay
                            min_rel_bcid[col_index,
                                         row_index] = pixel_data_round[
                                             row_index, col_index, delay]
                    elif second_scurve_mean[col_index, row_index] == 0 and (
                            delay - first_scurve_mean[col_index, row_index]
                    ) > 20:  # minimum distance 10, can otherwise be data 'jitter'
                        second_scurve_mean[col_index, row_index] = delay
                    if pixel_data_round[row_index, col_index,
                                        delay] == bcid_min:
                        if a_scurve_mean[col_index, row_index] == 0:
                            a_scurve_mean[col_index, row_index] = delay

                plsr_dac = int(re.search(r'\d+', node.name).group())
                plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
                if (np.count_nonzero(first_scurve_mean) -
                        np.count_nonzero(a_scurve_mean)) > 1e3:
                    logging.warning(
                        "The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong."
                    )
                selection = (second_scurve_mean -
                             first_scurve_mean)[np.logical_and(
                                 second_scurve_mean > 0,
                                 first_scurve_mean < second_scurve_mean)]
                delay_calibration_data.append(np.mean(selection))
                delay_calibration_data_error.append(np.std(selection))
                # Store the actual PlsrDAC data into result hist
                timewalk[:, :,
                         plsr_dac_index] = first_scurve_mean  # Save the plsr delay of first s-curve (for time walk calc.)
                hit_delay[:, :,
                          plsr_dac_index] = a_scurve_mean  # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.)
                progress_bar.update(index)

            for index, node in enumerate(
                    in_file_h5.root.HistsTot
            ):  # loop over tot hist for all PlsrDAC values
                plsr_dac = int(re.search(r'\d+', node.name).group())
                plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0]
                tot_data = node[:]
                tot[plsr_dac_index] = get_mean_from_histogram(
                    tot_data, range(16))

            # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels
            delay_calibration_mean = np.mean(
                np.array(delay_calibration_data[2:])[np.isfinite(
                    np.array(delay_calibration_data[2:]))])
            delay_calibration, delay_calibration_error = curve_fit(
                lambda x, par: (par),
                injection_delays,
                delay_calibration_data,
                p0=delay_calibration_mean,
                sigma=delay_calibration_data_error,
                absolute_sigma=True)
            delay_calibration, delay_calibration_error = delay_calibration[
                0], delay_calibration_error[0][0]

            progress_bar.finish()

        #  Save time walk / hit delay hists
        with tb.open_file(self.output_filename + '_analyzed.h5',
                          mode="r+") as out_file_h5:
            timewalk_result = np.swapaxes(timewalk, 0, 1)
            hit_delay_result = np.swapaxes(hit_delay, 0, 1)
            out = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistPixelTimewalkPerPlsrDac',
                title='Time walk per pixel and PlsrDAC',
                atom=tb.Atom.from_dtype(timewalk_result.dtype),
                shape=timewalk_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_2 = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistPixelHitDelayPerPlsrDac',
                title='Hit delay per pixel and PlsrDAC',
                atom=tb.Atom.from_dtype(hit_delay_result.dtype),
                shape=hit_delay_result.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out_3 = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistTotPerPlsrDac',
                title='Tot per PlsrDAC',
                atom=tb.Atom.from_dtype(tot.dtype),
                shape=tot.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            out.attrs.dimensions = 'column, row, PlsrDAC'
            out.attrs.delay_calibration = delay_calibration
            out.attrs.delay_calibration_error = delay_calibration_error
            out.attrs.plsr_dac_values = plsr_dac_values
            out_2.attrs.dimensions = 'column, row, PlsrDAC'
            out_2.attrs.delay_calibration = delay_calibration
            out_2.attrs.delay_calibration_error = delay_calibration_error
            out_2.attrs.plsr_dac_values = plsr_dac_values
            out_3.attrs.dimensions = 'PlsrDAC'
            out_3.attrs.plsr_dac_values = plsr_dac_values
            out[:] = timewalk_result
            out_2[:] = hit_delay_result
            out_3[:] = tot

        # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels
        with tb.open_file(self.output_filename + '_analyzed.h5',
                          mode="r") as in_file_h5:

            def plot_hit_delay(hist_3d,
                               charge_values,
                               title,
                               xlabel,
                               ylabel,
                               filename,
                               threshold=None,
                               tot_values=None):
                # Interpolate tot values for second tot axis
                interpolation = interp1d(tot_values,
                                         charge_values,
                                         kind='slinear',
                                         bounds_error=True)
                tot = np.arange(16)
                tot = tot[np.logical_and(tot >= np.amin(tot_values),
                                         tot <= np.amax(tot_values))]

                array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(
                    hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1])
                y = np.mean(array, axis=1)
                y_err = np.std(array, axis=1)

                fig = Figure()
                FigureCanvas(fig)
                ax = fig.add_subplot(111)
                fig.patch.set_facecolor('white')
                ax.grid(True)
                ax.set_xlabel(xlabel)
                ax.set_ylabel(ylabel)
                ax.set_xlim((0, np.amax(charge_values)))
                ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err)))
                ax.plot(charge_values, y, '.-', color='black', label=title)
                if threshold is not None:
                    ax.plot([threshold, threshold],
                            [np.amin(y - y_err),
                             np.amax(y + y_err)],
                            linestyle='--',
                            color='black',
                            label='Threshold\n%d e' % (threshold))
                ax.fill_between(charge_values,
                                y - y_err,
                                y + y_err,
                                color='gray',
                                alpha=0.5,
                                facecolor='gray',
                                label='RMS')
                ax2 = ax.twiny()
                ax2.set_xlabel("ToT")

                ticklab = ax2.xaxis.get_ticklabels()[0]
                trans = ticklab.get_transform()
                ax2.xaxis.set_label_coords(np.amax(charge_values),
                                           1,
                                           transform=trans)
                ax2.set_xlim(ax.get_xlim())
                ax2.set_xticks(interpolation(tot))
                ax2.set_xticklabels([str(int(i)) for i in tot])
                ax.text(0.5,
                        1.07,
                        title,
                        horizontalalignment='center',
                        fontsize=18,
                        transform=ax2.transAxes)
                ax.legend()
                filename.savefig(fig)

            plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values
            delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration
            charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope
            hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :]
            hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :]
            tot = in_file_h5.root.HistTotPerPlsrDac[:]

            hist_rel_timewalk = np.amax(
                hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk
            hist_rel_hit_delay = np.mean(hist_hit_delay[:, :,
                                                        -1]) - hist_hit_delay

            # Create mask and apply for bad pixels
            mask = np.ones(hist_rel_timewalk.shape, dtype=np.int8)
            for node in in_file_h5.root.PixelHistsMeanRelBcid:
                pixel_data = node[:, :, :]
                a = (np.sum(pixel_data, axis=2))
                mask[np.isfinite(a), :] = 0

            hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask)
            hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask)

            output_pdf = PdfPages(self.output_filename + '.pdf')
            plot_hit_delay(np.swapaxes(hist_rel_timewalk, 0, 1) * 25. /
                           delay_calibration,
                           charge_values=charge_values,
                           title='Time walk',
                           xlabel='Charge [e]',
                           ylabel='Time walk [ns]',
                           filename=output_pdf,
                           threshold=np.amin(charge_values),
                           tot_values=tot)
            plot_hit_delay(np.swapaxes(hist_rel_hit_delay, 0, 1) * 25. /
                           delay_calibration,
                           charge_values=charge_values,
                           title='Hit delay',
                           xlabel='Charge [e]',
                           ylabel='Hit delay [ns]',
                           filename=output_pdf,
                           threshold=np.amin(charge_values),
                           tot_values=tot)
            plot_scurves(np.swapaxes(hist_rel_timewalk, 0, 1),
                         scan_parameters=charge_values,
                         title='Timewalk of the FE-I4',
                         scan_parameter_name='Charge [e]',
                         ylabel='Timewalk [ns]',
                         min_x=0,
                         y_scale=25. / delay_calibration,
                         filename=output_pdf)
            plot_scurves(
                np.swapaxes(hist_hit_delay[:, :, :], 0, 1),
                scan_parameters=charge_values,
                title=
                'Hit delay (T0) with internal charge injection\nof the FE-I4',
                scan_parameter_name='Charge [e]',
                ylabel='Hit delay [ns]',
                min_x=0,
                y_scale=25. / delay_calibration,
                filename=output_pdf)

            for i in [
                    0, 1,
                    len(plsr_dac_values) / 4,
                    len(plsr_dac_values) / 2, -1
            ]:  # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting
                plot_three_way(
                    hist_rel_timewalk[:, :, i] * 25. / delay_calibration,
                    title='Time walk at %.0f e' % (charge_values[i]),
                    x_axis_title='Time walk [ns]',
                    filename=output_pdf)
                plot_three_way(
                    hist_hit_delay[:, :, i] * 25. / delay_calibration,
                    title=
                    'Hit delay (T0) with internal charge injection at %.0f e' %
                    (charge_values[i]),
                    x_axis_title='Hit delay [ns]',
                    minimum=np.amin(hist_hit_delay[:, :, i]),
                    maximum=np.amax(hist_hit_delay[:, :, i]),
                    filename=output_pdf)
            output_pdf.close()