def analyze(self): self.register.set_global_register_value("Vthin_AltFine", self.vthin_altfine_best) self.register.set_global_register_value("Vthin_AltCoarse", self.vthin_altcoarse_best) plotThreeWay(self.occ_array_sel_pixel.transpose(), title="Occupancy after GDAC tuning (GDAC " + str(self.scan_parameters.GDAC) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac) if self.close_plots: self.plots_filename.close()
def analyze(self): self.register.set_pixel_register_value("FDAC", self.fdac_mask_best) plotThreeWay(hist=self.tot_mean_best.transpose(), title="Mean ToT after FDAC tuning", x_axis_title="Mean ToT", filename=self.plots_filename, minimum=0, maximum=15) plotThreeWay(hist=self.fdac_mask_best.transpose(), title="FDAC distribution after tuning", x_axis_title="FDAC", filename=self.plots_filename, minimum=0, maximum=15) if self.close_plots: self.plots_filename.close()
def analyze(self): self.register.set_pixel_register_value("TDAC", self.tdac_mask_best) plotThreeWay(hist=self.occupancy_best.transpose(), title="Occupancy after TDAC tuning", x_axis_title="Occupancy", filename=self.plots_filename, maximum=self.n_injections_tdac) plotThreeWay(hist=self.tdac_mask_best.transpose(), title="TDAC distribution after tuning", x_axis_title="TDAC", filename=self.plots_filename, maximum=32) if self.close_plots: self.plots_filename.close()
def test_pixel_register(self, pix_regs=[ "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" ], dcs=range(40)): '''Test Pixel Register ''' logging.info('Running Pixel Register Test for %s' % str(pix_regs)) self.register_utils.configure_pixel() commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) self.fifo_readout.reset_sram_fifo() plots = PdfPages(self.output_filename + ".pdf") for i, result in enumerate( read_pixel_register(self, pix_regs=pix_regs, dcs=dcs)): result_array = np.ones_like(result) result_array.data[result == self.register.get_pixel_register_value( pix_regs[i])] = 0 logging.info("Pixel register %s: %d pixel error" % (pix_regs[i], np.count_nonzero(result_array == 1))) plotting.plotThreeWay( result_array.T, title=str(pix_regs[i]) + " register test with " + str(np.count_nonzero(result_array == 1)) + '/' + str(26880 - np.ma.count_masked(result_array)) + " pixel failing", x_axis_title="0:OK, 1:FAIL", maximum=1, filename=plots) plots.close()
def draw_hit_map_from_raw_data(raw_data_file, front_ends): with PdfPages(raw_data_file[:-3] + '.pdf') as output_pdf: with tb.open_file(raw_data_file, 'r') as in_file_h5: raw_data = in_file_h5.root.raw_data[:] for front_end in range(front_ends): print 'Create occupancy hist of front end %d' % front_end occupancy_array, _, _ = np.histogram2d(*readout_utils.convert_data_array(raw_data, filter_func=readout_utils.logical_and(readout_utils.is_data_record, readout_utils.is_data_from_channel(4 - front_end)), converter_func=readout_utils.get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]]) plotting.plotThreeWay(hist=occupancy_array.T, title="Occupancy of chip %d" % front_end, x_axis_title="Occupancy", filename=output_pdf)
def analyze(self): if self.global_iterations: GdacTuning.analyze(self) FeedbackTuning.analyze(self) if self.local_iterations: TdacTuning.analyze(self) FdacTuning.analyze(self) if self.make_plots: if self.local_iterations: plotThreeWay(hist=self.tot_mean_best.transpose(), title="Mean ToT after last FDAC tuning", x_axis_title='Mean ToT', filename=self.plots_filename) plotThreeWay(hist=self.register.get_pixel_register_value( "FDAC").transpose(), title="FDAC distribution after last FDAC tuning", x_axis_title='FDAC', filename=self.plots_filename, maximum=16) if self.local_iterations >= 0: plotThreeWay(hist=self.occupancy_best.transpose(), title="Occupancy after tuning", x_axis_title='Occupancy', filename=self.plots_filename, maximum=100) plotThreeWay(hist=self.register.get_pixel_register_value( "TDAC").transpose(), title="TDAC distribution after complete tuning", x_axis_title='TDAC', filename=self.plots_filename, maximum=32) self.plots_filename.close()
def analyze(self): self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold) self.register.set_pixel_register_value('TDAC', self.last_good_tdac) self.register.set_pixel_register_value('Enable', self.last_good_enable_mask) with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data: analyze_raw_data.create_source_scan_hist = True analyze_raw_data.interpreter.set_warning_output(False) analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() plot_occupancy(self.last_occupancy_hist.T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), filename=analyze_raw_data.output_pdf) plot_fancy_occupancy(self.last_occupancy_hist.T, filename=analyze_raw_data.output_pdf) plot_occupancy(self.last_occupancy_mask.T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=1, filename=analyze_raw_data.output_pdf) plot_fancy_occupancy(self.last_occupancy_mask.T, filename=analyze_raw_data.output_pdf) plotThreeWay(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32) plot_occupancy(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=31, filename=analyze_raw_data.output_pdf) plot_occupancy(self.register.get_pixel_register_value('Enable').T, title='Enable Mask', z_max=1, filename=analyze_raw_data.output_pdf) plot_fancy_occupancy( self.register.get_pixel_register_value('Enable').T, filename=analyze_raw_data.output_pdf)
def analyze(self): self.register.set_global_register_value("Vthin_AltFine", self.last_good_threshold + self.increase_threshold) self.register.set_pixel_register_value('TDAC', self.last_good_tdac) self.register.set_pixel_register_value('Enable', self.last_good_enable_mask) with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data: analyze_raw_data.create_source_scan_hist = True analyze_raw_data.interpreter.set_warning_output(False) analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() plot_occupancy(self.last_occupancy_hist.T, title='Noisy Pixels at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), filename=analyze_raw_data.output_pdf) plot_fancy_occupancy(self.last_occupancy_hist.T, filename=analyze_raw_data.output_pdf) plot_occupancy(self.last_occupancy_mask.T, title='Occupancy Mask at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=1, filename=analyze_raw_data.output_pdf) plot_fancy_occupancy(self.last_occupancy_mask.T, filename=analyze_raw_data.output_pdf) plotThreeWay(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), x_axis_title="TDAC", filename=analyze_raw_data.output_pdf, maximum=31, bins=32) plot_occupancy(self.last_tdac_distribution.T, title='TDAC at Vthin_AltFine %d Step %d' % (self.last_reg_val, self.last_step), z_max=31, filename=analyze_raw_data.output_pdf) plot_occupancy(self.register.get_pixel_register_value('Enable').T, title='Enable Mask', z_max=1, filename=analyze_raw_data.output_pdf) plot_fancy_occupancy(self.register.get_pixel_register_value('Enable').T, filename=analyze_raw_data.output_pdf)
def test_pixel_register(self, pix_regs=["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"], dcs=range(40)): '''Test Pixel Register ''' logging.info('Running Pixel Register Test for %s', str(pix_regs)) self.register_utils.configure_pixel() commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) self.fifo_readout.reset_sram_fifo() pixel_register_errors = 0 plots = PdfPages(self.output_filename + ".pdf") for i, result in enumerate(read_pixel_register(self, pix_regs=pix_regs, dcs=dcs)): result_array = np.ones_like(result) result_array.data[result == self.register.get_pixel_register_value(pix_regs[i])] = 0 pixel_register_errors += np.count_nonzero(result_array == 1) logging.info("Pixel register %s: %d pixel error", pix_regs[i], np.count_nonzero(result_array == 1)) plotting.plotThreeWay(result_array.T, title=str(pix_regs[i]) + " register test with " + str(np.count_nonzero(result_array == 1)) + '/' + str(26880 - np.ma.count_masked(result_array)) + " pixel failing", x_axis_title="0:OK, 1:FAIL", maximum=1, filename=plots) plots.close() return pixel_register_errors
def analyze(self): if self.global_iterations: GdacTuning.analyze(self) FeedbackTuning.analyze(self) if self.local_iterations: TdacTuning.analyze(self) FdacTuning.analyze(self) if self.make_plots: if self.local_iterations: plotThreeWay(hist=self.tot_mean_best.transpose(), title="Mean ToT after last FDAC tuning", x_axis_title='Mean ToT', filename=self.plots_filename) plotThreeWay(hist=self.register.get_pixel_register_value("FDAC").transpose(), title="FDAC distribution after last FDAC tuning", x_axis_title='FDAC', filename=self.plots_filename, maximum=16) if self.local_iterations >= 0: plotThreeWay(hist=self.occupancy_best.transpose(), title="Occupancy after tuning", x_axis_title='Occupancy', filename=self.plots_filename, maximum=100) plotThreeWay(hist=self.register.get_pixel_register_value("TDAC").transpose(), title="TDAC distribution after complete tuning", x_axis_title='TDAC', filename=self.plots_filename, maximum=32) self.plots_filename.close()
def analyze(self): def analyze_raw_data_file(file_name): with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_tot_hist = False analyze_raw_data.create_fitted_threshold_hists = True analyze_raw_data.create_threshold_mask = True analyze_raw_data.interpreter.set_warning_output( True ) # so far the data structure in a threshold scan was always bad, too many warnings given analyze_raw_data.interpret_word_table() def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values): logging.info("Storing calibration data in a table...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table) threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table) for column in range(80): for row in range(336): for parameter_value_index, parameter_value in enumerate( parameter_values): threshold_calib_table.row['column'] = column threshold_calib_table.row['row'] = row threshold_calib_table.row[ 'parameter_value'] = parameter_value threshold_calib_table.row[ 'threshold'] = threshold_calibration[ column, row, parameter_value_index] threshold_calib_table.row.append() for parameter_value_index, parameter_value in enumerate( parameter_values): mean_threshold_calib_table.row[ 'parameter_value'] = parameter_value mean_threshold_calib_table.row[ 'mean_threshold'] = mean_threshold_calibration[ parameter_value_index] mean_threshold_calib_table.row[ 'threshold_rms'] = mean_threshold_rms_calibration[ parameter_value_index] mean_threshold_calib_table.row.append() threshold_calib_table.flush() mean_threshold_calib_table.flush() logging.info("done") def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration): logging.info("Storing calibration data in an array...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table) mean_threshold_calib_rms_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table) threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table) mean_threshold_calib_array[:] = mean_threshold_calibration mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration threshold_calib_array[:] = threshold_calibration logging.info("done") def mask_columns(pixel_array, ignore_columns): idx = np.array(ignore_columns) - 1 # from FE to Array columns m = np.zeros_like(pixel_array) m[:, idx] = 1 return np.ma.masked_array(pixel_array, m) calibration_file = self.output_filename + '_calibration' raw_data_files = analysis_utils.get_data_file_names_from_scan_base( self.output_filename, filter_file_words=['interpreted', 'calibration_calibration']) parameter_name = self.scan_parameters._fields[1] for raw_data_file in raw_data_files: # no using multithreading here, it is already used in fit analyze_raw_data_file(raw_data_file) files_per_parameter = analysis_utils.get_parameter_value_from_file_names( [ file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files ], parameter_name) logging.info("Create calibration from data") with tb.openFile( self.output_filename + '.h5', mode="r") as in_file_h5: # deduce settings from raw data file ignore_columns = in_file_h5.root.configuration.run_conf[:][ np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0] ignore_columns = ast.literal_eval(ignore_columns) mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ), dtype='<f8') mean_threshold_rms_calibration = np.empty( shape=(len(raw_data_files), ), dtype='<f8') threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8') if self.create_plots: logging.info('Saving calibration plots in: %s' % (calibration_file + '.pdf')) output_pdf = PdfPages(calibration_file + '.pdf') parameter_values = [] for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()): parameter_values.append(parameters.values()[0][0]) with tb.openFile(analyzed_data_file, mode="r") as in_file_h5: occupancy_masked = mask_columns( pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns ) # mask the not scanned columns for analysis and plotting thresholds_masked = mask_columns( pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns) if self.create_plots: plotThreeWay(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf) plsr_dacs = analysis_utils.get_scan_parameter( meta_data_array=in_file_h5.root.meta_data[:] )['PlsrDAC'] plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf) # fill the calibration data arrays mean_threshold_calibration[index] = np.ma.mean( thresholds_masked) mean_threshold_rms_calibration[index] = np.ma.std( thresholds_masked) threshold_calibration[:, :, index] = thresholds_masked.T with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5: store_calibration_data_as_array( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration) store_calibration_data_as_table( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values) if self.create_plots: plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf) plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf) output_pdf.close()
def analyze(self): # plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1'] plsr_dac_slope = 55. # Interpret data and create hit table with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output( False) # a lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(self.output_filename + '_analyzed.h5', mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid') hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid') hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot') hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot') hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot') def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac)) bcid_mean_array = np.average( bcid_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype( 'f4' ) # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = np.average( tot_pixel_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype( 'f4' ) # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.createCArray( hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, injection delay' out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.createCArray( hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2.attrs.dimensions = 'column, row, injection delay, relative bcid' out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.createCArray( hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3.attrs.dimensions = 'column, row, injection delay' out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.createCArray( hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4.attrs.dimensions = 'column, row, injection delay' out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.createCArray( hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(self.output_filename + '_interpreted.h5', 'r') as in_file_h5: scan_parameters_dict = get_scan_parameter( in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict['PlsrDAC'] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[scan_parameters_dict.keys( )[1]] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int16) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16, ), dtype=np.int32) # tot array of actual PlsrDAC logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=max(plsr_dac) - min(plsr_dac), term_width=80) for index, (parameters, hits) in enumerate( get_hits_of_scan_parameter(self.output_filename + '_interpreted.h5', scan_parameters, chunk_size=1.5e7)): if index == 0: progress_bar.start( ) # start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[ 0], parameters[1] column, row, rel_bcid, tot = hits['column'] - 1, hits[ 'row'] - 1, hits['relative_BCID'], hits['tot'] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16, )) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int8) tot_array = np.zeros((16, ), dtype=np.int32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where( np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast else: # save histograms of last PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: # Create temporary result data structures plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array tot = np.zeros(shape=(len(plsr_dac_values), ), dtype=np.float16) # result array hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array min_rel_bcid = np.zeros( shape=(80, 336), dtype=np.int8 ) # Temp array to make sure that the Scurve from the same BCID is used delay_calibration_data = [] delay_calibration_data_error = [] # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work. plsr_dac_min = min(plsr_dac_values) rel_bcid_min_injection = in_file_h5.get_node( in_file_h5.root.PixelHistsMeanRelBcid, 'HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % plsr_dac_min) injection_delays = np.array( rel_bcid_min_injection.attrs.injection_delay_values) injection_delay_min = np.where( injection_delays == np.amax(injection_delays))[0][0] bcid_min = int( round( np.mean( np.ma.masked_array( rel_bcid_min_injection[:, :, injection_delay_min], np.isnan( rel_bcid_min_injection[:, :, injection_delay_min])))) ) - 1 # Info output with progressbar logging.info('Create timewalk info for PlsrDACs ' + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len(plsr_dac_values), term_width=80) progress_bar.start() for index, node in enumerate( in_file_h5.root.PixelHistsMeanRelBcid ): # loop over all mean relative BCID hists for all PlsrDAC values # Select the S-curves pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape( pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]) # Reshape for interpolation of Nans nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp( x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape( pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]) # Reshape after interpolation of Nans pixel_data_round = np.round(pixel_data_fixed) pixel_data_round_diff = np.diff(pixel_data_round, axis=2) index_sel = np.where( np.logical_and(pixel_data_round_diff > 0., np.isfinite(pixel_data_round_diff))) # Temporary result histograms to be filled first_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the first S-curve in the data for the lowest injection (for time walk) second_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the second S-curve in the data (to calibrate one inj. delay step) a_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the mean of the S-curve at a given rel. BCID (for hit delay) # Loop over the S-curve means for (row_index, col_index, delay_index) in np.column_stack( (index_sel)): delay = injection_delays[delay_index] if first_scurve_mean[col_index, row_index] == 0: if delay_index == 0: # ignore the first index, can be wrong due to nan filling continue if pixel_data_round[ row_index, col_index, delay] >= min_rel_bcid[ col_index, row_index]: # make sure to always use the data of the same BCID first_scurve_mean[col_index, row_index] = delay min_rel_bcid[col_index, row_index] = pixel_data_round[ row_index, col_index, delay] elif second_scurve_mean[col_index, row_index] == 0 and ( delay - first_scurve_mean[col_index, row_index] ) > 20: # minimum distance 10, can otherwise be data 'jitter' second_scurve_mean[col_index, row_index] = delay if pixel_data_round[row_index, col_index, delay] == bcid_min: if a_scurve_mean[col_index, row_index] == 0: a_scurve_mean[col_index, row_index] = delay plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] if (np.count_nonzero(first_scurve_mean) - np.count_nonzero(a_scurve_mean)) > 1e3: logging.warning( "The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong." ) selection = (second_scurve_mean - first_scurve_mean)[np.logical_and( second_scurve_mean > 0, first_scurve_mean < second_scurve_mean)] delay_calibration_data.append(np.mean(selection)) delay_calibration_data_error.append(np.std(selection)) # Store the actual PlsrDAC data into result hist timewalk[:, :, plsr_dac_index] = first_scurve_mean # Save the plsr delay of first s-curve (for time walk calc.) hit_delay[:, :, plsr_dac_index] = a_scurve_mean # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.) progress_bar.update(index) for index, node in enumerate( in_file_h5.root.HistsTot ): # loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram( tot_data, range(16)) # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels delay_calibration_mean = np.mean( np.array(delay_calibration_data[2:])[np.isfinite( np.array(delay_calibration_data[2:]))]) delay_calibration, delay_calibration_error = curve_fit( lambda x, par: (par), injection_delays, delay_calibration_data, p0=delay_calibration_mean, sigma=delay_calibration_data_error, absolute_sigma=True) delay_calibration, delay_calibration_error = delay_calibration[ 0], delay_calibration_error[0][0] progress_bar.finish() # Save time walk / hit delay hists with tb.open_file(self.output_filename + '_analyzed.h5', mode="r+") as out_file_h5: timewalk_result = np.swapaxes(timewalk, 0, 1) hit_delay_result = np.swapaxes(hit_delay, 0, 1) out = out_file_h5.createCArray( out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk_result.dtype), shape=timewalk_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray( out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay_result.dtype), shape=hit_delay_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.createCArray( out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, PlsrDAC' out.attrs.delay_calibration = delay_calibration out.attrs.delay_calibration_error = delay_calibration_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = 'column, row, PlsrDAC' out_2.attrs.delay_calibration = delay_calibration out_2.attrs.delay_calibration_error = delay_calibration_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = 'PlsrDAC' out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk_result out_2[:] = hit_delay_result out_3[:] = tot # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape( hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.amax(charge_values))) ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err))) ax.plot(charge_values, y, '.-', color='black', label=title) if threshold is not None: ax.plot([threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold)) ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS') ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_rel_timewalk = np.amax( hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk hist_rel_hit_delay = np.mean(hist_hit_delay[:, :, -1]) - hist_hit_delay # Create mask and apply for bad pixels mask = np.ones((336, 80, 50), dtype=np.int8) for node in in_file_h5.root.PixelHistsMeanRelBcid: pixel_data = node[:, :, :] a = (np.sum(pixel_data, axis=2)) mask[np.isfinite(a), :] = 0 hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask) hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask) output_pdf = PdfPages(self.output_filename + '.pdf') plot_hit_delay(np.swapaxes(hist_rel_timewalk, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_hit_delay(np.swapaxes(hist_rel_hit_delay, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_scurves(np.swapaxes(hist_rel_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) plot_scurves( np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title= 'Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) for i in [ 0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1 ]: # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plotThreeWay(hist_rel_timewalk[:, :, i] * 25. / delay_calibration, title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf) plotThreeWay( hist_hit_delay[:, :, i] * 25. / delay_calibration, title= 'Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf) output_pdf.close()
def scan(self): if not self.plots_filename: self.plots_filename = PdfPages(self.output_filename + '.pdf') self.close_plots = True else: self.close_plots = False mask_steps = 3 enable_mask_steps = [] cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", mask_steps=mask_steps)[0] self.write_target_threshold() additional_scan = True lastBitResult = np.zeros(shape=self.register.get_pixel_register_value("TDAC").shape, dtype=self.register.get_pixel_register_value("TDAC").dtype) self.set_start_tdac() self.occupancy_best = np.empty(shape=(80, 336)) # array to store the best occupancy (closest to Ninjections/2) of the pixel self.occupancy_best.fill(self.n_injections_tdac) self.tdac_mask_best = self.register.get_pixel_register_value("TDAC") for scan_parameter_value, tdac_bit in enumerate(self.tdac_tune_bits): if additional_scan: self.set_tdac_bit(tdac_bit) logging.info('TDAC setting: bit %d = 1', tdac_bit) else: self.set_tdac_bit(tdac_bit, bit_value=0) logging.info('TDAC setting: bit %d = 0', tdac_bit) self.write_tdac_config() with self.readout(TDAC=scan_parameter_value, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data): scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_tdac, mask_steps=mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction) occupancy_array, _, _ = np.histogram2d(*convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]]) select_better_pixel_mask = abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2) pixel_with_too_high_occupancy_mask = occupancy_array > self.n_injections_tdac / 2 self.occupancy_best[select_better_pixel_mask] = occupancy_array[select_better_pixel_mask] if self.plot_intermediate_steps: plotThreeWay(occupancy_array.transpose(), title="Occupancy (TDAC tuning bit " + str(tdac_bit) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_tdac) tdac_mask = self.register.get_pixel_register_value("TDAC") self.tdac_mask_best[select_better_pixel_mask] = tdac_mask[select_better_pixel_mask] if tdac_bit > 0: tdac_mask[pixel_with_too_high_occupancy_mask] = tdac_mask[pixel_with_too_high_occupancy_mask] & ~(1 << tdac_bit) self.register.set_pixel_register_value("TDAC", tdac_mask) if tdac_bit == 0: if additional_scan: # scan bit = 0 with the correct value again additional_scan = False lastBitResult = occupancy_array.copy() self.tdac_tune_bits.append(0) # bit 0 has to be scanned twice else: tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] | (1 << tdac_bit) occupancy_array[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = lastBitResult[abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] self.occupancy_best[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = occupancy_array[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] self.tdac_mask_best[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = tdac_mask[abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] self.register.set_pixel_register_value("TDAC", self.tdac_mask_best) # set value for meta scan self.write_tdac_config()
def histogram_tdc_hits(input_file_hits, hit_selection_conditions, event_status_select_mask, event_status_condition, calibation_file=None, max_tdc=analysis_configuration['max_tdc'], n_bins=analysis_configuration['n_bins']): for condition in hit_selection_conditions: logging.info('Histogram tdc hits with %s', condition) def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration): # return the charge from calibration charge_calibration = np.zeros(shape=(80, 336, max_tdc)) for column in range(80): for row in range(336): actual_pixel_calibration = tdc_pixel_calibration[column, row, :] if np.any(actual_pixel_calibration != 0) and np.all(np.isfinite(actual_pixel_calibration)): interpolation = interp1d(x=actual_pixel_calibration, y=tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0) charge_calibration[column, row, :] = interpolation(np.arange(max_tdc)) return charge_calibration def plot_tdc_tot_correlation(data, condition, output_pdf): logging.info('Plot correlation histogram for %s', condition) plt.clf() data = np.ma.array(data, mask=(data <= 0)) if np.ma.any(data > 0): cmap = cm.get_cmap('jet', 200) cmap.set_bad('w') plt.title('Correlation with %s' % condition) norm = colors.LogNorm() z_max = data.max(fill_value=0) plt.xlabel('TDC') plt.ylabel('TOT') im = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', interpolation='nearest') # , norm=norm) divider = make_axes_locatable(plt.gca()) plt.gca().invert_yaxis() cax = divider.append_axes("right", size="5%", pad=0.1) plt.colorbar(im, cax=cax, ticks=np.linspace(start=0, stop=z_max, num=9, endpoint=True)) output_pdf.savefig() else: logging.warning('No data for correlation plotting for %s', condition) def plot_hits_per_condition(output_pdf): logging.info('Plot hits selection efficiency histogram for %d conditions', len(hit_selection_conditions) + 2) labels = ['All Hits', 'Hits of\ngood events'] for condition in hit_selection_conditions: condition = re.sub('[&]', '\n', condition) condition = re.sub('[()]', '', condition) labels.append(condition) plt.bar(range(len(n_hits_per_condition)), n_hits_per_condition, align='center') plt.xticks(range(len(n_hits_per_condition)), labels, size=8) plt.title('Number of hits for different cuts') plt.yscale('log') plt.ylabel('#') plt.grid() for x, y in zip(np.arange(len(n_hits_per_condition)), n_hits_per_condition): plt.annotate('%d' % (float(y) / float(n_hits_per_condition[0]) * 100.) + r'%', xy=(x, y / 2.), xycoords='data', color='grey', size=15) output_pdf.savefig() def plot_corrected_tdc_hist(x, y, title, output_pdf, point_style='-'): logging.info('Plot TDC hist with TDC calibration') plt.clf() y /= np.amax(y) if y.shape[0] > 0 else y plt.plot(x, y, point_style) plt.title(title, size=10) plt.xlabel('Charge [PlsrDAC]') plt.ylabel('Count [a.u.]') plt.grid() output_pdf.savefig() # Create data with tb.openFile(input_file_hits, mode="r") as in_hit_file_h5: cluster_hit_table = in_hit_file_h5.root.ClusterHits # Result hists, initialized per condition pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336, max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336, 256), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] mean_pixel_tdc_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] mean_pixel_tdc_timestamp_hists_per_condition = [np.zeros(shape=(80, 336), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] tdc_hists_per_condition = [np.zeros(shape=(max_tdc), dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] tdc_corr_hists_per_condition = [np.zeros(shape=(max_tdc, 16), dtype=np.uint32) for _ in hit_selection_conditions] if hit_selection_conditions else [] n_hits_per_condition = [0 for _ in range(len(hit_selection_conditions) + 2)] # condition 1, 2 are all hits, hits of goode events logging.info('Select hits and create TDC histograms for %d cut conditions', len(hit_selection_conditions)) progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_hit_table.shape[0], term_width=80) progress_bar.start() for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=1e8): n_hits_per_condition[0] += cluster_hits.shape[0] selected_events_cluster_hits = cluster_hits[np.logical_and(cluster_hits['TDC'] < max_tdc, (cluster_hits['event_status'] & event_status_select_mask) == event_status_condition)] n_hits_per_condition[1] += selected_events_cluster_hits.shape[0] for index, condition in enumerate(hit_selection_conditions): selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition) n_hits_per_condition[2 + index] += selected_cluster_hits.shape[0] column, row, tdc = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC'] pixel_tdc_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc, shape=(80, 336, max_tdc)) mean_pixel_tdc_hists_per_condition[index] = np.average(pixel_tdc_hists_per_condition[index], axis=2, weights=range(0, max_tdc)) * np.sum(np.arange(0, max_tdc)) / pixel_tdc_hists_per_condition[index].sum(axis=2) tdc_timestamp = selected_cluster_hits['TDC_time_stamp'] pixel_tdc_timestamp_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc_timestamp, shape=(80, 336, 256)) mean_pixel_tdc_timestamp_hists_per_condition[index] = np.average(pixel_tdc_timestamp_hists_per_condition[index], axis=2, weights=range(0, 256)) * np.sum(np.arange(0, 256)) / pixel_tdc_timestamp_hists_per_condition[index].sum(axis=2) tdc_hists_per_condition[index] = pixel_tdc_hists_per_condition[index].sum(axis=(0, 1)) tdc_corr_hists_per_condition[index] += analysis_utils.hist_2d_index(tdc, selected_cluster_hits['tot'], shape=(max_tdc, 16)) progress_bar.update(n_hits_per_condition[0]) progress_bar.finish() # Take TDC calibration if available and calculate charge for each TDC value and pixel if calibation_file is not None: with tb.openFile(calibation_file, mode="r") as in_file_calibration_h5: tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1] tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:] charge_calibration = get_charge(max_tdc, tdc_calibration_values, tdc_calibration) else: charge_calibration = None # Store data of result histograms with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="w") as out_file_h5: for index, condition in enumerate(hit_selection_conditions): pixel_tdc_hist_result = np.swapaxes(pixel_tdc_hists_per_condition[index], 0, 1) pixel_tdc_timestamp_hist_result = np.swapaxes(pixel_tdc_timestamp_hists_per_condition[index], 0, 1) mean_pixel_tdc_hist_result = np.swapaxes(mean_pixel_tdc_hists_per_condition[index], 0, 1) mean_pixel_tdc_timestamp_hist_result = np.swapaxes(mean_pixel_tdc_timestamp_hists_per_condition[index], 0, 1) tdc_hists_per_condition_result = tdc_hists_per_condition[index] tdc_corr_hist_result = np.swapaxes(tdc_corr_hists_per_condition[index], 0, 1) # Create result hists out_1 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcCondition_%d' % index, title='Hist Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_hist_result.dtype), shape=pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcTimestampCondition_%d' % index, title='Hist Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(pixel_tdc_timestamp_hist_result.dtype), shape=pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcCondition_%d' % index, title='Hist Mean Pixel Tdc with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_hist_result.dtype), shape=mean_pixel_tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4 = out_file_h5.createCArray(out_file_h5.root, name='HistMeanPixelTdcTimestampCondition_%d' % index, title='Hist Mean Pixel Tdc Timestamp with %s' % condition, atom=tb.Atom.from_dtype(mean_pixel_tdc_timestamp_hist_result.dtype), shape=mean_pixel_tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCondition_%d' % index, title='Hist Tdc with %s' % condition, atom=tb.Atom.from_dtype(tdc_hists_per_condition_result.dtype), shape=tdc_hists_per_condition_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_6 = out_file_h5.createCArray(out_file_h5.root, name='HistTdcCorrCondition_%d' % index, title='Hist Correlation Tdc/Tot with %s' % condition, atom=tb.Atom.from_dtype(tdc_corr_hist_result.dtype), shape=tdc_corr_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) # Add result hists information out_1.attrs.dimensions, out_1.attrs.condition, out_1.attrs.tdc_values = 'column, row, TDC value', condition, range(max_tdc) out_2.attrs.dimensions, out_2.attrs.condition, out_2.attrs.tdc_values = 'column, row, TDC time stamp value', condition, range(256) out_3.attrs.dimensions, out_3.attrs.condition = 'column, row, mean TDC value', condition out_4.attrs.dimensions, out_4.attrs.condition = 'column, row, mean TDC time stamp value', condition out_5.attrs.dimensions, out_5.attrs.condition = 'PlsrDAC', condition out_6.attrs.dimensions, out_6.attrs.condition = 'TDC, TOT', condition out_1[:], out_2[:], out_3[:], out_4[:], out_5[:], out_6[:] = pixel_tdc_hist_result, pixel_tdc_timestamp_hist_result, mean_pixel_tdc_hist_result, mean_pixel_tdc_timestamp_hist_result, tdc_hists_per_condition_result, tdc_corr_hist_result if charge_calibration is not None: # Select only valid pixel for histograming: they have data and a calibration (that is any charge(TDC) calibration != 0) valid_pixel = np.where(np.logical_and(charge_calibration[:, :, :max_tdc].sum(axis=2) > 0, pixel_tdc_hist_result[:, :, :max_tdc].swapaxes(0, 1).sum(axis=2) > 0)) mean_charge_calibration = charge_calibration[valid_pixel][:, :max_tdc].mean(axis=0) mean_tdc_hist = pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].mean(axis=0) result_array = np.rec.array(np.column_stack((mean_charge_calibration, mean_tdc_hist)), dtype=[('charge', float), ('count', float)]) out_6 = out_file_h5.create_table(out_file_h5.root, name='HistMeanTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with mean charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_6.attrs.condition = condition out_6.attrs.n_pixel = valid_pixel[0].shape[0] out_6.append(result_array) # Create charge histogram with per pixel TDC(charge) calibration x, y = charge_calibration[valid_pixel][:, :max_tdc].ravel(), np.ravel(pixel_tdc_hist_result.swapaxes(0, 1)[valid_pixel][:, :max_tdc].ravel()) y, x = y[x > 0], x[x > 0] # remove the hit tdcs without proper calibration plsrDAC(TDC) calibration x, y, yerr = analysis_utils.get_profile_histogram(x, y, n_bins=n_bins) result_array = np.rec.array(np.column_stack((x, y, yerr)), dtype=[('charge', float), ('count', float), ('count_error', float)]) out_7 = out_file_h5.create_table(out_file_h5.root, name='HistTdcCalibratedCondition_%d' % index, description=result_array.dtype, title='Hist Tdc with per pixel charge calibration and %s' % condition, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_7.attrs.condition = condition out_7.attrs.n_pixel = valid_pixel[0].shape[0] out_7.append(result_array) # Plot Data with PdfPages(input_file_hits[:-3] + '_calibrated_tdc_hists.pdf') as output_pdf: plot_hits_per_condition(output_pdf) with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="r") as in_file_h5: for node in in_file_h5.root: # go through the data and plot them if 'MeanPixel' in node.name: try: plotThreeWay(np.ma.masked_invalid(node[:]) * 1.5625, title='Mean TDC delay, hits with\n%s' % node._v_attrs.condition if 'Timestamp' in node.name else 'Mean TDC, hits with\n%s' % node._v_attrs.condition, filename=output_pdf) except ValueError: logging.warning('Cannot plot TDC delay') elif 'HistTdcCondition' in node.name: hist_1d = node[:] entry_index = np.where(hist_1d != 0) if entry_index[0].shape[0] != 0: max_index = np.amax(entry_index) else: max_index = max_tdc plot_1d_hist(hist_1d[:max_index + 10], title='TDC histogram, hits with\n%s' % node._v_attrs.condition if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits with\n%s' % node._v_attrs.condition, x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf) elif 'HistPixelTdc' in node.name: hist_3d = node[:] entry_index = np.where(hist_3d.sum(axis=(0, 1)) != 0) if entry_index[0].shape[0] != 0: max_index = np.amax(entry_index) else: max_index = max_tdc best_pixel_index = np.where(hist_3d.sum(axis=2) == np.amax(node[:].sum(axis=2))) if best_pixel_index[0].shape[0] == 1: # there could be more than one pixel with most hits plot_1d_hist(hist_3d[best_pixel_index][0, :max_index], title='TDC histogram of pixel %d, %d\n%s' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1, node._v_attrs.condition) if 'Timestamp' not in node.name else 'TDC time stamp histogram, hits of pixel %d, %d' % (best_pixel_index[1] + 1, best_pixel_index[0] + 1), x_axis_title='TDC' if 'Timestamp' not in node.name else 'TDC time stamp', filename=output_pdf) elif 'HistTdcCalibratedCondition' in node.name: plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, per pixel TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf) elif 'HistMeanTdcCalibratedCondition' in node.name: plot_corrected_tdc_hist(node[:]['charge'], node[:]['count'], title='TDC histogram, %d pixel, mean TDC calib.\n%s' % (node._v_attrs.n_pixel, node._v_attrs.condition), output_pdf=output_pdf) elif 'HistTdcCorr' in node.name: plot_tdc_tot_correlation(node[:], node._v_attrs.condition, output_pdf)
def scan(self): if not self.plots_filename: self.plots_filename = PdfPages(self.output_filename + '.pdf') self.close_plots = True else: self.close_plots = False cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", mask_steps=self.mask_steps_gdac)[0] self.write_target_threshold() for gdac_bit in self.gdac_tune_bits: # reset all GDAC bits self.set_gdac_bit(gdac_bit, bit_value=0) additional_scan = True last_bit_result = self.n_injections_gdac decreased_threshold = False # needed to determine if the FE is noisy all_bits_zero = True def bits_set(int_type): int_type = int(int_type) count = 0 position = 0 bits_set = [] while(int_type): if(int_type & 1): bits_set.append(position) position += 1 int_type = int_type >> 1 count += 1 return bits_set # calculate selected pixels from the mask and the disabled columns select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8) if not self.enable_mask_steps_gdac: self.enable_mask_steps_gdac = range(self.mask_steps_gdac) for mask_step in self.enable_mask_steps_gdac: select_mask_array += make_pixel_mask(steps=self.mask_steps_gdac, shift=mask_step) for column in bits_set(self.register.get_global_register_value("DisableColumnCnfg")): logging.info('Deselect double column %d' % column) select_mask_array[column, :] = 0 occupancy_best = 0 vthin_af_best = self.register.get_global_register_value("Vthin_AltFine") vthin_ac_best = self.register.get_global_register_value("Vthin_AltCoarse") for gdac_bit in self.gdac_tune_bits: if additional_scan: self.set_gdac_bit(gdac_bit) scan_parameter_value = (self.register.get_global_register_value("Vthin_AltCoarse") << 8) + self.register.get_global_register_value("Vthin_AltFine") logging.info('GDAC setting: %d, bit %d = 1' % (scan_parameter_value, gdac_bit)) else: self.set_gdac_bit(gdac_bit, bit_value=0) scan_parameter_value = (self.register.get_global_register_value("Vthin_AltCoarse") << 8) + self.register.get_global_register_value("Vthin_AltFine") logging.info('GDAC setting: %d, bit %d = 0' % (scan_parameter_value, gdac_bit)) with self.readout(GDAC=scan_parameter_value): scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_gdac, mask_steps=self.mask_steps_gdac, enable_mask_steps=self.enable_mask_steps_gdac, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction) self.raw_data_file.append(self.fifo_readout.data, scan_parameters=self.scan_parameters._asdict()) occupancy_array, _, _ = np.histogram2d(*convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]]) self.occ_array_sel_pixel = np.ma.array(occupancy_array, mask=np.logical_not(np.ma.make_mask(select_mask_array))) # take only selected pixel into account by creating a mask median_occupancy = np.ma.median(self.occ_array_sel_pixel) if abs(median_occupancy - self.n_injections_gdac / 2) < abs(occupancy_best - self.n_injections_gdac / 2): occupancy_best = median_occupancy vthin_af_best = self.register.get_global_register_value("Vthin_AltFine") vthin_ac_best = self.register.get_global_register_value("Vthin_AltCoarse") if self.plot_intermediate_steps: plotThreeWay(self.occ_array_sel_pixel.transpose(), title="Occupancy (GDAC " + str(scan_parameter_value) + " with tuning bit " + str(gdac_bit) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac) if abs(median_occupancy - self.n_injections_gdac / 2) < self.max_delta_threshold and gdac_bit > 0: # abort if good value already found to save time logging.info('Median = %f, good result already achieved (median - Ninj/2 < %f), skipping not varied bits' % (median_occupancy, self.max_delta_threshold)) break if median_occupancy == 0 and decreased_threshold and all_bits_zero: logging.info('FE noisy?') if gdac_bit > 0: if (median_occupancy < self.n_injections_gdac / 2): # set GDAC bit to 0 if the occupancy is too lowm, thus decrease threshold logging.info('Median = %f < %f, set bit %d = 0' % (median_occupancy, self.n_injections_gdac / 2, gdac_bit)) self.set_gdac_bit(gdac_bit, bit_value=0) decreased_threshold = True else: # set GDAC bit to 1 if the occupancy is too high, thus increase threshold logging.info('Median = %f > %f, leave bit %d = 1' % (median_occupancy, self.n_injections_gdac / 2, gdac_bit)) decreased_threshold = False all_bits_zero = False if gdac_bit == 0: if additional_scan: # scan bit = 0 with the correct value again additional_scan = False last_bit_result = self.occ_array_sel_pixel.copy() self.gdac_tune_bits.append(0) # bit 0 has to be scanned twice else: last_bit_result_median = np.median(last_bit_result[select_mask_array > 0]) logging.info('Scanned bit 0 = 0 with %f instead of %f' % (median_occupancy, last_bit_result_median)) if abs(median_occupancy - self.n_injections_gdac / 2) > abs(last_bit_result_median - self.n_injections_gdac / 2): # if bit 0 = 0 is worse than bit 0 = 1, so go back self.set_gdac_bit(gdac_bit, bit_value=1) logging.info('Set bit 0 = 1') self.occ_array_sel_pixel = last_bit_result median_occupancy = np.ma.median(self.occ_array_sel_pixel) else: logging.info('Set bit 0 = 0') if abs(occupancy_best - self.n_injections_gdac / 2) < abs(median_occupancy - self.n_injections_gdac / 2): logging.info("Binary search converged to non optimal value, take best measured value instead") median_occupancy = occupancy_best self.register.set_global_register_value("Vthin_AltFine", vthin_af_best) self.register.set_global_register_value("Vthin_AltCoarse", vthin_ac_best) if (self.register.get_global_register_value("Vthin_AltFine") == 0 and self.register.get_global_register_value("Vthin_AltCoarse") == 0) or self.register.get_global_register_value("Vthin_AltFine") == 254: logging.warning('GDAC reached minimum/maximum value') if abs(median_occupancy - self.n_injections_gdac / 2) > 2 * self.max_delta_threshold: logging.warning('Global threshold tuning failed. Delta threshold = %f > %f. Vthin_AltCoarse / Vthin_AltFine = %d / %d' % (abs(median_occupancy - self.n_injections_gdac / 2), self.max_delta_threshold, self.register.get_global_register_value("Vthin_AltCoarse"), self.register.get_global_register_value("Vthin_AltFine"))) else: logging.info('Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d' % (self.register.get_global_register_value("Vthin_AltCoarse"), self.register.get_global_register_value("Vthin_AltFine"))) self.vthin_altfine_best = self.register.get_global_register_value("Vthin_AltFine") self.vthin_altcoarse_best = self.register.get_global_register_value("Vthin_AltCoarse")
def scan(self): if not self.plots_filename: self.plots_filename = PdfPages(self.output_filename + '.pdf') self.close_plots = True else: self.close_plots = False mask_steps = 3 enable_mask_steps = [] cal_lvl1_command = self.register.get_commands( "CAL")[0] + self.register.get_commands( "zeros", length=40)[0] + self.register.get_commands( "LV1")[0] + self.register.get_commands( "zeros", mask_steps=mask_steps)[0] self.write_target_charge() additional_scan = True lastBitResult = np.zeros( shape=self.register.get_pixel_register_value("FDAC").shape, dtype=self.register.get_pixel_register_value("FDAC").dtype) self.set_start_fdac() self.tot_mean_best = np.empty( shape=(80, 336) ) # array to store the best occupancy (closest to Ninjections/2) of the pixel self.tot_mean_best.fill(0) self.fdac_mask_best = self.register.get_pixel_register_value("FDAC") for scan_parameter_value, fdac_bit in enumerate(self.fdac_tune_bits): if additional_scan: self.set_fdac_bit(fdac_bit) logging.info('FDAC setting: bit %d = 1' % fdac_bit) else: self.set_fdac_bit(fdac_bit, bit_value=0) logging.info('FDAC setting: bit %d = 0' % fdac_bit) self.write_fdac_config() with self.readout(FDAC=scan_parameter_value): scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_fdac, mask_steps=mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction) self.raw_data_file.append( self.fifo_readout.data, scan_parameters=self.scan_parameters._asdict()) col_row_tot = np.column_stack( convert_data_array( data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_tot_array_from_data_record_array )) tot_array = np.histogramdd(col_row_tot, bins=(80, 336, 16), range=[[1, 80], [1, 336], [0, 15]])[0] tot_mean_array = np.average( tot_array, axis=2, weights=range(0, 16)) * sum(range( 0, 16)) / self.n_injections_fdac select_better_pixel_mask = abs( tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.target_tot) pixel_with_too_small_mean_tot_mask = tot_mean_array < self.target_tot self.tot_mean_best[select_better_pixel_mask] = tot_mean_array[ select_better_pixel_mask] if self.plot_intermediate_steps: plotThreeWay(hist=tot_mean_array.transpose().transpose(), title="Mean ToT (FDAC tuning bit " + str(fdac_bit) + ")", x_axis_title='mean ToT', filename=self.plots_filename, minimum=0, maximum=15) fdac_mask = self.register.get_pixel_register_value("FDAC") self.fdac_mask_best[select_better_pixel_mask] = fdac_mask[ select_better_pixel_mask] if fdac_bit > 0: fdac_mask[pixel_with_too_small_mean_tot_mask] = fdac_mask[ pixel_with_too_small_mean_tot_mask] & ~(1 << fdac_bit) self.register.set_pixel_register_value("FDAC", fdac_mask) if fdac_bit == 0: if additional_scan: # scan bit = 0 with the correct value again additional_scan = False lastBitResult = tot_mean_array.copy() self.fdac_tune_bits.append( 0) # bit 0 has to be scanned twice else: fdac_mask[abs(tot_mean_array - self.target_tot) > abs( lastBitResult - self.target_tot )] = fdac_mask[abs(tot_mean_array - self.target_tot) > abs( lastBitResult - self.target_tot)] | (1 << fdac_bit) tot_mean_array[abs(tot_mean_array - self.target_tot) > abs( lastBitResult - self.target_tot)] = lastBitResult[ abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] self.tot_mean_best[ abs(tot_mean_array - self.target_tot) <= abs( self.tot_mean_best - self.n_injections_fdac / 2)] = tot_mean_array[ abs(tot_mean_array - self.target_tot) <= abs( self.tot_mean_best - self.n_injections_fdac / 2)] self.fdac_mask_best[ abs(tot_mean_array - self.target_tot) <= abs( self.tot_mean_best - self.n_injections_fdac / 2)] = fdac_mask[ abs(tot_mean_array - self.target_tot) <= abs( self.tot_mean_best - self.n_injections_fdac / 2)] self.register.set_pixel_register_value( "FDAC", self.fdac_mask_best) # set value for meta scan self.write_fdac_config()
def analyze(self): # plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1'] plsr_dac_slope = 55. # Interpret data and create hit table with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output(False) # a lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(self.output_filename + '_analyzed.h5', mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid') hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid') hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot') hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot') hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot') def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac)) bcid_mean_array = np.average(bcid_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype('f4') # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = np.average(tot_pixel_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype('f4') # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.createCArray(hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, injection delay' out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.createCArray(hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2.attrs.dimensions = 'column, row, injection delay, relative bcid' out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.createCArray(hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3.attrs.dimensions = 'column, row, injection delay' out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.createCArray(hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4.attrs.dimensions = 'column, row, injection delay' out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.createCArray(hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(self.output_filename + '_interpreted.h5', 'r') as in_file_h5: scan_parameters_dict = get_scan_parameter(in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict['PlsrDAC'] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[scan_parameters_dict.keys()[1]] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int16) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16,), dtype=np.int32) # tot array of actual PlsrDAC logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac)) progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=max(plsr_dac) - min(plsr_dac), term_width=80) for index, (parameters, hits) in enumerate(get_hits_of_scan_parameter(self.output_filename + '_interpreted.h5', scan_parameters, chunk_size=1.5e7)): if index == 0: progress_bar.start() # start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[0], parameters[1] column, row, rel_bcid, tot = hits['column'] - 1, hits['row'] - 1, hits['relative_BCID'], hits['tot'] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16,)) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_array = np.zeros((16,), dtype=np.int32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where(np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) # save histograms of last PlsrDAC setting progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: # Create temporary result data structures plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array tot = np.zeros(shape=(len(plsr_dac_values),), dtype=np.float16) # result array hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array min_rel_bcid = np.zeros(shape=(80, 336), dtype=np.int8) # Temp array to make sure that the Scurve from the same BCID is used delay_calibration_data = [] delay_calibration_data_error = [] # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work. plsr_dac_min = min(plsr_dac_values) rel_bcid_min_injection = in_file_h5.get_node(in_file_h5.root.PixelHistsMeanRelBcid, 'HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % plsr_dac_min) injection_delays = np.array(rel_bcid_min_injection.attrs.injection_delay_values) injection_delay_min = np.where(injection_delays == np.amax(injection_delays))[0][0] bcid_min = int(round(np.mean(np.ma.masked_array(rel_bcid_min_injection[:, :, injection_delay_min], np.isnan(rel_bcid_min_injection[:, :, injection_delay_min]))))) - 1 # Info output with progressbar logging.info('Create timewalk info for PlsrDACs ' + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(plsr_dac_values), term_width=80) progress_bar.start() for index, node in enumerate(in_file_h5.root.PixelHistsMeanRelBcid): # loop over all mean relative BCID hists for all PlsrDAC values # Select the S-curves pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape(pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]) # Reshape for interpolation of Nans nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp(x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape(pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]) # Reshape after interpolation of Nans pixel_data_round = np.round(pixel_data_fixed) pixel_data_round_diff = np.diff(pixel_data_round, axis=2) index_sel = np.where(np.logical_and(pixel_data_round_diff > 0., np.isfinite(pixel_data_round_diff))) # Temporary result histograms to be filled first_scurve_mean = np.zeros(shape=(80, 336), dtype=np.int8) # the first S-curve in the data for the lowest injection (for time walk) second_scurve_mean = np.zeros(shape=(80, 336), dtype=np.int8) # the second S-curve in the data (to calibrate one inj. delay step) a_scurve_mean = np.zeros(shape=(80, 336), dtype=np.int8) # the mean of the S-curve at a given rel. BCID (for hit delay) # Loop over the S-curve means for (row_index, col_index, delay_index) in np.column_stack((index_sel)): delay = injection_delays[delay_index] if first_scurve_mean[col_index, row_index] == 0: if delay_index == 0: # ignore the first index, can be wrong due to nan filling continue if pixel_data_round[row_index, col_index, delay] >= min_rel_bcid[col_index, row_index]: # make sure to always use the data of the same BCID first_scurve_mean[col_index, row_index] = delay min_rel_bcid[col_index, row_index] = pixel_data_round[row_index, col_index, delay] elif second_scurve_mean[col_index, row_index] == 0 and (delay - first_scurve_mean[col_index, row_index]) > 20: # minimum distance 10, can otherwise be data 'jitter' second_scurve_mean[col_index, row_index] = delay if pixel_data_round[row_index, col_index, delay] == bcid_min: if a_scurve_mean[col_index, row_index] == 0: a_scurve_mean[col_index, row_index] = delay plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] if (np.count_nonzero(first_scurve_mean) - np.count_nonzero(a_scurve_mean)) > 1e3: logging.warning("The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong.") selection = (second_scurve_mean - first_scurve_mean)[np.logical_and(second_scurve_mean > 0, first_scurve_mean < second_scurve_mean)] delay_calibration_data.append(np.mean(selection)) delay_calibration_data_error.append(np.std(selection)) # Store the actual PlsrDAC data into result hist timewalk[:, :, plsr_dac_index] = first_scurve_mean # Save the plsr delay of first s-curve (for time walk calc.) hit_delay[:, :, plsr_dac_index] = a_scurve_mean # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.) progress_bar.update(index) for index, node in enumerate(in_file_h5.root.HistsTot): # loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16)) # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels delay_calibration_mean = np.mean(np.array(delay_calibration_data[2:])[np.isfinite(np.array(delay_calibration_data[2:]))]) delay_calibration, delay_calibration_error = curve_fit(lambda x, par: (par), injection_delays, delay_calibration_data, p0=delay_calibration_mean, sigma=delay_calibration_data_error, absolute_sigma=True) delay_calibration, delay_calibration_error = delay_calibration[0], delay_calibration_error[0][0] progress_bar.finish() # Save time walk / hit delay hists with tb.open_file(self.output_filename + '_analyzed.h5', mode="r+") as out_file_h5: timewalk_result = np.swapaxes(timewalk, 0, 1) hit_delay_result = np.swapaxes(hit_delay, 0, 1) out = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk_result.dtype), shape=timewalk_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay_result.dtype), shape=hit_delay_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, PlsrDAC' out.attrs.delay_calibration = delay_calibration out.attrs.delay_calibration_error = delay_calibration_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = 'column, row, PlsrDAC' out_2.attrs.delay_calibration = delay_calibration out_2.attrs.delay_calibration_error = delay_calibration_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = 'PlsrDAC' out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk_result out_2[:] = hit_delay_result out_3[:] = tot # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.amax(charge_values))) ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err))) ax.plot(charge_values, y, '.-', color='black', label=title) if threshold is not None: ax.plot([threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold)) ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS') ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_rel_timewalk = np.amax(hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk hist_rel_hit_delay = np.mean(hist_hit_delay[:, :, -1]) - hist_hit_delay # Create mask and apply for bad pixels mask = np.ones(hist_rel_timewalk.shape, dtype=np.int8) for node in in_file_h5.root.PixelHistsMeanRelBcid: pixel_data = node[:, :, :] a = (np.sum(pixel_data, axis=2)) mask[np.isfinite(a), :] = 0 hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask) hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask) output_pdf = PdfPages(self.output_filename + '.pdf') plot_hit_delay(np.swapaxes(hist_rel_timewalk, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_hit_delay(np.swapaxes(hist_rel_hit_delay, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_scurves(np.swapaxes(hist_rel_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) plot_scurves(np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title='Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) for i in [0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1]: # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plotThreeWay(hist_rel_timewalk[:, :, i] * 25. / delay_calibration, title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf) plotThreeWay(hist_hit_delay[:, :, i] * 25. / delay_calibration, title='Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf) output_pdf.close()
def scan(self): if not self.plots_filename: self.plots_filename = PdfPages(self.output_filename + '.pdf') self.close_plots = True else: self.close_plots = False mask_steps = 3 enable_mask_steps = [] cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", mask_steps=mask_steps)[0] self.write_target_charge() additional_scan = True lastBitResult = np.zeros(shape=self.register.get_pixel_register_value("FDAC").shape, dtype=self.register.get_pixel_register_value("FDAC").dtype) self.set_start_fdac() self.tot_mean_best = np.empty(shape=(80, 336)) # array to store the best occupancy (closest to Ninjections/2) of the pixel self.tot_mean_best.fill(0) self.fdac_mask_best = self.register.get_pixel_register_value("FDAC") for scan_parameter_value, fdac_bit in enumerate(self.fdac_tune_bits): if additional_scan: self.set_fdac_bit(fdac_bit) logging.info('FDAC setting: bit %d = 1', fdac_bit) else: self.set_fdac_bit(fdac_bit, bit_value=0) logging.info('FDAC setting: bit %d = 0', fdac_bit) self.write_fdac_config() with self.readout(FDAC=scan_parameter_value, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data): scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_fdac, mask_steps=mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction) col_row_tot = np.column_stack(convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_tot_array_from_data_record_array)) tot_array = np.histogramdd(col_row_tot, bins=(80, 336, 16), range=[[1, 80], [1, 336], [0, 15]])[0] tot_mean_array = np.average(tot_array, axis=2, weights=range(0, 16)) * sum(range(0, 16)) / self.n_injections_fdac select_better_pixel_mask = abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.target_tot) pixel_with_too_small_mean_tot_mask = tot_mean_array < self.target_tot self.tot_mean_best[select_better_pixel_mask] = tot_mean_array[select_better_pixel_mask] if self.plot_intermediate_steps: plotThreeWay(hist=tot_mean_array.transpose().transpose(), title="Mean ToT (FDAC tuning bit " + str(fdac_bit) + ")", x_axis_title='mean ToT', filename=self.plots_filename, minimum=0, maximum=15) fdac_mask = self.register.get_pixel_register_value("FDAC") self.fdac_mask_best[select_better_pixel_mask] = fdac_mask[select_better_pixel_mask] if fdac_bit > 0: fdac_mask[pixel_with_too_small_mean_tot_mask] = fdac_mask[pixel_with_too_small_mean_tot_mask] & ~(1 << fdac_bit) self.register.set_pixel_register_value("FDAC", fdac_mask) if fdac_bit == 0: if additional_scan: # scan bit = 0 with the correct value again additional_scan = False lastBitResult = tot_mean_array.copy() self.fdac_tune_bits.append(0) # bit 0 has to be scanned twice else: fdac_mask[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] = fdac_mask[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] | (1 << fdac_bit) tot_mean_array[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] = lastBitResult[abs(tot_mean_array - self.target_tot) > abs(lastBitResult - self.target_tot)] self.tot_mean_best[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] = tot_mean_array[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] self.fdac_mask_best[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] = fdac_mask[abs(tot_mean_array - self.target_tot) <= abs(self.tot_mean_best - self.n_injections_fdac / 2)] self.register.set_pixel_register_value("FDAC", self.fdac_mask_best) # set value for meta scan self.write_fdac_config()
def create_threshold_calibration(scan_base_file_name, create_plots=True): # Create calibration function, can be called stand alone def analyze_raw_data_file(file_name): if os.path.isfile(file_name[:-3] + '_interpreted.h5'): # skip analysis if already done logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.') else: with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_tot_hist = False analyze_raw_data.create_tot_pixel_hist = False analyze_raw_data.create_fitted_threshold_hists = True analyze_raw_data.create_threshold_mask = True analyze_raw_data.interpreter.set_warning_output(False) # RX errors would fill the console analyze_raw_data.interpret_word_table() def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values): logging.info("Storing calibration data in a table...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table) threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table) for column in range(80): for row in range(336): for parameter_value_index, parameter_value in enumerate(parameter_values): threshold_calib_table.row['column'] = column threshold_calib_table.row['row'] = row threshold_calib_table.row['parameter_value'] = parameter_value threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index] threshold_calib_table.row.append() for parameter_value_index, parameter_value in enumerate(parameter_values): mean_threshold_calib_table.row['parameter_value'] = parameter_value mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index] mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index] mean_threshold_calib_table.row.append() threshold_calib_table.flush() mean_threshold_calib_table.flush() logging.info("done") def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values): logging.info("Storing calibration data in an array...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table) mean_threshold_calib_rms_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table) threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table) mean_threshold_calib_array[:] = mean_threshold_calibration mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration threshold_calib_array[:] = threshold_calibration mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name] mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name] threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name] mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values threshold_calib_array.attrs.scan_parameter_values = parameter_values logging.info("done") def mask_columns(pixel_array, ignore_columns): idx = np.array(ignore_columns) - 1 # from FE to Array columns m = np.zeros_like(pixel_array) m[:, idx] = 1 return np.ma.masked_array(pixel_array, m) raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name, filter_file_words=['interpreted', 'calibration_calibration']) first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0] # multilpe scan_base_file_names for multiple runs with tb.openFile(first_scan_base_file_name + '.h5', mode="r") as in_file_h5: # deduce scan parameters from the first (and often only) scan base file name ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0] parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0] ignore_columns = ast.literal_eval(ignore_columns) parameter_name = ast.literal_eval(parameter_name)[1][0] calibration_file = first_scan_base_file_name + '_calibration' for raw_data_file in raw_data_files: # analyze each raw data file, not using multithreading here, it is already used in s-curve fit analyze_raw_data_file(raw_data_file) files_per_parameter = analysis_utils.get_parameter_value_from_file_names([file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True) logging.info("Create calibration from data") mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8') mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8') threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8') if create_plots: logging.info('Saving calibration plots in: %s', calibration_file + '.pdf') output_pdf = PdfPages(calibration_file + '.pdf') progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80) progress_bar.start() parameter_values = [] for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()): parameter_values.append(parameters.values()[0][0]) with tb.openFile(analyzed_data_file, mode="r") as in_file_h5: occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns) # mask the not scanned columns for analysis and plotting thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns) if create_plots: plotThreeWay(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf) plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC'] plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf) # fill the calibration data arrays mean_threshold_calibration[index] = np.ma.mean(thresholds_masked) mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked) threshold_calibration[:, :, index] = thresholds_masked.T progress_bar.update(index) progress_bar.finish() with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5: store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values) store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values) if create_plots: plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf) plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf) output_pdf.close()
def histogram_tdc_hits(input_file_hits, hit_selection_conditions, event_status_select_mask, event_status_condition, calibation_file=None, max_tdc=2000): for condition in hit_selection_conditions: logging.info('Histogram tdc hits with %s' % condition) def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration): # return the charge from calibration charge_calibration = np.zeros(shape=(80, 336, max_tdc)) for column in range(80): for row in range(336): actual_pixel_calibration = tdc_pixel_calibration[column, row, :] if np.any(actual_pixel_calibration != 0): interpolation = interp1d(x=actual_pixel_calibration, y=tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0) charge_calibration[column, row, :] = interpolation(np.arange(max_tdc)) return charge_calibration with tb.openFile(input_file_hits, mode="r") as in_hit_file_h5: cluster_hit_table = in_hit_file_h5.root.ClusterHits shape_tdc_hist, shape_mean_tdc_hist = (80, 336, max_tdc), (80, 336) shape_tdc_timestamp_hist, shape_mean_tdc_timestamp_hist = (80, 336, 256), (80, 336) tdc_hists_per_condition = [np.zeros(shape=shape_tdc_hist, dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] tdc_timestamp_hists_per_condition = [np.zeros(shape=shape_tdc_timestamp_hist, dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] mean_tdc_hists_per_condition = [np.zeros(shape=shape_mean_tdc_hist, dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] mean_tdc_timestamp_hists_per_condition = [np.zeros(shape=shape_mean_tdc_timestamp_hist, dtype=np.uint16) for _ in hit_selection_conditions] if hit_selection_conditions else [] n_hits_per_condition = [0 for _ in range(len(hit_selection_conditions) + 2)] # 1/2 condition are all hits / hits of goode events for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=2e7): n_hits_per_condition[0] += cluster_hits.shape[0] selected_events_cluster_hits = cluster_hits[(cluster_hits['event_status'] & event_status_select_mask) == event_status_condition] n_hits_per_condition[1] += selected_events_cluster_hits.shape[0] for index, condition in enumerate(hit_selection_conditions): selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition) n_hits_per_condition[2 + index] += selected_cluster_hits.shape[0] column, row, tdc = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC'] tdc_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc, shape=shape_tdc_hist) mean_tdc_hists_per_condition[index] = np.average(tdc_hists_per_condition[index], axis=2, weights=range(0, max_tdc)) * np.sum(np.arange(0, max_tdc)) / tdc_hists_per_condition[index].sum(axis=2) tdc_timestamp = selected_cluster_hits['TDC_time_stamp'] tdc_timestamp_hists_per_condition[index] += analysis_utils.hist_3d_index(column, row, tdc_timestamp, shape=shape_tdc_timestamp_hist) mean_tdc_timestamp_hists_per_condition[index] = np.average(tdc_timestamp_hists_per_condition[index], axis=2, weights=range(0, shape_tdc_timestamp_hist[2])) * np.sum(np.arange(0, shape_tdc_timestamp_hist[2])) / tdc_timestamp_hists_per_condition[index].sum(axis=2) plotThreeWay(mean_tdc_hists_per_condition[0].T * 1.5625, title='Mean TDC, condition 1', filename='test_tdc.pdf') # , minimum=50, maximum=250) plotThreeWay(mean_tdc_timestamp_hists_per_condition[0].T * 1.5625, title='Mean TDC delay, condition 1', filename='test_tdc_ts.pdf', minimum=20, maximum=60) with tb.open_file(input_file_hits[:-3] + '_tdc_hists.h5', mode="w") as out_file_h5: for index, condition in enumerate(hit_selection_conditions): tdc_hist_result = np.swapaxes(tdc_hists_per_condition[index], 0, 1) tdc_timestamp_hist_result = np.swapaxes(tdc_timestamp_hists_per_condition[index], 0, 1) out = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcCondition_%d' % index, title='Hist PixelTdc with %s' % condition, atom=tb.Atom.from_dtype(tdc_hist_result.dtype), shape=tdc_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTdcTimestampCondition_%d' % index, title='Hist PixelTdcTimestamp with %s' % condition, atom=tb.Atom.from_dtype(tdc_timestamp_hist_result.dtype), shape=tdc_timestamp_hist_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, TDC value' out.attrs.condition = condition out.attrs.tdc_values = range(max_tdc) out_2.attrs.dimensions = 'column, row, TDC time stamp value' out_2.attrs.condition = condition out_2.attrs.tdc_values = range(shape_tdc_timestamp_hist[2]) out[:] = tdc_hist_result out_2[:] = tdc_timestamp_hist_result with PdfPages(input_file_hits[:-3] + '_calibrated_tdc_hists.pdf') as output_pdf: logging.info('Create hits selection efficiency histogram for %d conditions' % (len(hit_selection_conditions) + 2)) labels = ['All Hits', 'Hits of\ngood events'] for condition in hit_selection_conditions: condition = re.sub('[&]', '\n', condition) condition = re.sub('[()]', '', condition) labels.append(condition) plt.bar(range(len(n_hits_per_condition)), n_hits_per_condition, align='center') plt.xticks(range(len(n_hits_per_condition)), labels, size=8) plt.title('Number of hits for different cuts') plt.ylabel('#') plt.grid() for x, y in zip(np.arange(len(n_hits_per_condition)), n_hits_per_condition): plt.annotate('%d' % (float(y) / float(n_hits_per_condition[0]) * 100.) + r'%', xy=(x, y / 2.), xycoords='data', color='grey', size=15) output_pdf.savefig() if calibation_file is not None: with tb.openFile(calibation_file, mode="r") as in_file_h5: tdc_calibration = in_file_h5.root.HitOrCalibration[:, :, 1:, 1] tdc_calibration_values = in_file_h5.root.HitOrCalibration.attrs.scan_parameter_values[1:] charge = get_charge(max_tdc, tdc_calibration_values, tdc_calibration) plt.clf() with tb.openFile(input_file_hits[:-3] + '_calibrated_tdc_hists.h5', mode="w") as out_file_h5: logging.info('Create corrected TDC histogram for %d conditions' % len(hit_selection_conditions)) for index, condition in enumerate(hit_selection_conditions): c_str = re.sub('[&]', '\n', condition) x, y = [], [] for column in range(0, 80, 1): for row in range(0, 336, 1): if tdc_hists_per_condition[0][column, row, :].sum() < analysis_configuration['min_pixel_hits']: continue x.extend(charge[column, row, :].ravel()) y.extend(tdc_hists_per_condition[index][column, row, :].ravel()) x, y, _ = analysis_utils.get_profile_histogram(np.array(x) * 55., np.array(y), n_bins=120) result = np.zeros(shape=(x.shape[0], ), dtype=[("x", np.float), ("y", np.float)]) result['x'], result['y'] = x, y actual_tdc_hist_table = out_file_h5.create_table(out_file_h5.root, name='TdcHistTableCondition%d' % index, description=result.dtype, title='TDC histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) actual_tdc_hist_table.append(result) actual_tdc_hist_table.attrs.condition = condition if index == 0: normalization = 100. / np.amax(y) plt.plot(x, y * normalization, '.', label=c_str) # Plot hists into one plot plt.plot([27.82 * 55., 27.82 * 55.], [0, 100], label='Threshold %d e' % (28.82 * 55.), linewidth=2) plt.ylim((0, 100)) plt.legend(loc=0, prop={'size': 12}) plt.xlabel('Charge [e]') plt.ylabel('#') plt.grid() output_pdf.savefig()
def scan(self): if not self.plots_filename: self.plots_filename = PdfPages(self.output_filename + '.pdf') self.close_plots = True else: self.close_plots = False mask_steps = 3 enable_mask_steps = [] cal_lvl1_command = self.register.get_commands( "CAL")[0] + self.register.get_commands( "zeros", length=40)[0] + self.register.get_commands( "LV1")[0] + self.register.get_commands( "zeros", mask_steps=mask_steps)[0] self.write_target_threshold() additional_scan = True lastBitResult = np.zeros( shape=self.register.get_pixel_register_value("TDAC").shape, dtype=self.register.get_pixel_register_value("TDAC").dtype) self.set_start_tdac() self.occupancy_best = np.empty( shape=(80, 336) ) # array to store the best occupancy (closest to Ninjections/2) of the pixel self.occupancy_best.fill(self.n_injections_tdac) self.tdac_mask_best = self.register.get_pixel_register_value("TDAC") for scan_parameter_value, tdac_bit in enumerate(self.tdac_tune_bits): if additional_scan: self.set_tdac_bit(tdac_bit) logging.info('TDAC setting: bit %d = 1' % tdac_bit) else: self.set_tdac_bit(tdac_bit, bit_value=0) logging.info('TDAC setting: bit %d = 0' % tdac_bit) self.write_tdac_config() with self.readout(TDAC=scan_parameter_value): scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_tdac, mask_steps=mask_steps, enable_mask_steps=enable_mask_steps, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction) self.raw_data_file.append( self.fifo_readout.data, scan_parameters=self.scan_parameters._asdict()) occupancy_array, _, _ = np.histogram2d(*convert_data_array( data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]]) select_better_pixel_mask = abs(occupancy_array - self.n_injections_tdac / 2) <= abs( self.occupancy_best - self.n_injections_tdac / 2) pixel_with_too_high_occupancy_mask = occupancy_array > self.n_injections_tdac / 2 self.occupancy_best[select_better_pixel_mask] = occupancy_array[ select_better_pixel_mask] if self.plot_intermediate_steps: plotThreeWay(occupancy_array.transpose(), title="Occupancy (TDAC tuning bit " + str(tdac_bit) + ")", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_tdac) tdac_mask = self.register.get_pixel_register_value("TDAC") self.tdac_mask_best[select_better_pixel_mask] = tdac_mask[ select_better_pixel_mask] if tdac_bit > 0: tdac_mask[pixel_with_too_high_occupancy_mask] = tdac_mask[ pixel_with_too_high_occupancy_mask] & ~(1 << tdac_bit) self.register.set_pixel_register_value("TDAC", tdac_mask) if tdac_bit == 0: if additional_scan: # scan bit = 0 with the correct value again additional_scan = False lastBitResult = occupancy_array.copy() self.tdac_tune_bits.append( 0) # bit 0 has to be scanned twice else: tdac_mask[ abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = tdac_mask[ abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] | ( 1 << tdac_bit) occupancy_array[ abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] = lastBitResult[ abs(occupancy_array - self.n_injections_tdac / 2) > abs(lastBitResult - self.n_injections_tdac / 2)] self.occupancy_best[ abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = occupancy_array[ abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] self.tdac_mask_best[ abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] = tdac_mask[ abs(occupancy_array - self.n_injections_tdac / 2) <= abs(self.occupancy_best - self.n_injections_tdac / 2)] self.register.set_pixel_register_value( "TDAC", self.tdac_mask_best) # set value for meta scan self.write_tdac_config()