def analyze(self): # plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1'] plsr_dac_slope = 55.0 # Interpret data and create hit table with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output(False) # a lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(self.output_filename + "_analyzed.h5", mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, "PixelHistsMeanRelBcid") hists_folder_2 = out_file_h5.create_group(out_file_h5.root, "PixelHistsRelBcid") hists_folder_3 = out_file_h5.create_group(out_file_h5.root, "PixelHistsTot") hists_folder_4 = out_file_h5.create_group(out_file_h5.root, "PixelHistsMeanTot") hists_folder_5 = out_file_h5.create_group(out_file_h5.root, "HistsTot") def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug("Store histograms for PlsrDAC " + str(old_plsr_dac)) bcid_mean_array = ( np.average(bcid_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype("f4") ) # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = ( np.average(tot_pixel_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype("f4") ) # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.createCArray( hists_folder, name="HistPixelMeanRelBcidPerDelayPlsrDac_%03d" % old_plsr_dac, title="Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out.attrs.dimensions = "column, row, injection delay" out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.createCArray( hists_folder_2, name="HistPixelRelBcidPerDelayPlsrDac_%03d" % old_plsr_dac, title="Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_2.attrs.dimensions = "column, row, injection delay, relative bcid" out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.createCArray( hists_folder_3, name="HistPixelTotPerDelayPlsrDac_%03d" % old_plsr_dac, title="Tot hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_3.attrs.dimensions = "column, row, injection delay" out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.createCArray( hists_folder_4, name="HistPixelMeanTotPerDelayPlsrDac_%03d" % old_plsr_dac, title="Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC " + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_4.attrs.dimensions = "column, row, injection delay" out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.createCArray( hists_folder_5, name="HistTotPlsrDac_%03d" % old_plsr_dac, title="Tot histogram for PlsrDAC " + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(self.output_filename + "_interpreted.h5", "r") as in_file_h5: scan_parameters_dict = get_scan_parameter(in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict["PlsrDAC"] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[ scan_parameters_dict.keys()[1] ] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int16 ) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16,), dtype=np.int32) # tot array of actual PlsrDAC logging.info("Store histograms for PlsrDAC values " + str(plsr_dac)) progress_bar = progressbar.ProgressBar( widgets=[ "", progressbar.Percentage(), " ", progressbar.Bar(marker="*", left="|", right="|"), " ", progressbar.AdaptiveETA(), ], maxval=max(plsr_dac) - min(plsr_dac), term_width=80, ) for index, (parameters, hits) in enumerate( get_hits_of_scan_parameter(self.output_filename + "_interpreted.h5", scan_parameters, chunk_size=1.5e7) ): if index == 0: progress_bar.start() # start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[0], parameters[1] column, row, rel_bcid, tot = hits["column"] - 1, hits["row"] - 1, hits["relative_BCID"], hits["tot"] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16,)) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_array = np.zeros((16,), dtype=np.int32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where(np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) # save histograms of last PlsrDAC setting progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(self.output_filename + "_analyzed.h5", mode="r") as in_file_h5: # Create temporary result data structures plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array tot = np.zeros(shape=(len(plsr_dac_values),), dtype=np.float16) # result array hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array min_rel_bcid = np.zeros( shape=(80, 336), dtype=np.int8 ) # Temp array to make sure that the Scurve from the same BCID is used delay_calibration_data = [] delay_calibration_data_error = [] # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work. plsr_dac_min = min(plsr_dac_values) rel_bcid_min_injection = in_file_h5.get_node( in_file_h5.root.PixelHistsMeanRelBcid, "HistPixelMeanRelBcidPerDelayPlsrDac_%03d" % plsr_dac_min ) injection_delays = np.array(rel_bcid_min_injection.attrs.injection_delay_values) injection_delay_min = np.where(injection_delays == np.amax(injection_delays))[0][0] bcid_min = ( int( round( np.mean( np.ma.masked_array( rel_bcid_min_injection[:, :, injection_delay_min], np.isnan(rel_bcid_min_injection[:, :, injection_delay_min]), ) ) ) ) - 1 ) # Info output with progressbar logging.info("Create timewalk info for PlsrDACs " + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar( widgets=[ "", progressbar.Percentage(), " ", progressbar.Bar(marker="*", left="|", right="|"), " ", progressbar.AdaptiveETA(), ], maxval=len(plsr_dac_values), term_width=80, ) progress_bar.start() for index, node in enumerate( in_file_h5.root.PixelHistsMeanRelBcid ): # loop over all mean relative BCID hists for all PlsrDAC values # Select the S-curves pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape( pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2] ) # Reshape for interpolation of Nans nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp(x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape( pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2] ) # Reshape after interpolation of Nans pixel_data_round = np.round(pixel_data_fixed) pixel_data_round_diff = np.diff(pixel_data_round, axis=2) index_sel = np.where(np.logical_and(pixel_data_round_diff > 0.0, np.isfinite(pixel_data_round_diff))) # Temporary result histograms to be filled first_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the first S-curve in the data for the lowest injection (for time walk) second_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the second S-curve in the data (to calibrate one inj. delay step) a_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the mean of the S-curve at a given rel. BCID (for hit delay) # Loop over the S-curve means for (row_index, col_index, delay_index) in np.column_stack((index_sel)): delay = injection_delays[delay_index] if first_scurve_mean[col_index, row_index] == 0: if delay_index == 0: # ignore the first index, can be wrong due to nan filling continue if ( pixel_data_round[row_index, col_index, delay] >= min_rel_bcid[col_index, row_index] ): # make sure to always use the data of the same BCID first_scurve_mean[col_index, row_index] = delay min_rel_bcid[col_index, row_index] = pixel_data_round[row_index, col_index, delay] elif ( second_scurve_mean[col_index, row_index] == 0 and (delay - first_scurve_mean[col_index, row_index]) > 20 ): # minimum distance 10, can otherwise be data 'jitter' second_scurve_mean[col_index, row_index] = delay if pixel_data_round[row_index, col_index, delay] == bcid_min: if a_scurve_mean[col_index, row_index] == 0: a_scurve_mean[col_index, row_index] = delay plsr_dac = int(re.search(r"\d+", node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] if (np.count_nonzero(first_scurve_mean) - np.count_nonzero(a_scurve_mean)) > 1e3: logging.warning( "The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong." ) selection = (second_scurve_mean - first_scurve_mean)[ np.logical_and(second_scurve_mean > 0, first_scurve_mean < second_scurve_mean) ] delay_calibration_data.append(np.mean(selection)) delay_calibration_data_error.append(np.std(selection)) # Store the actual PlsrDAC data into result hist timewalk[ :, :, plsr_dac_index ] = first_scurve_mean # Save the plsr delay of first s-curve (for time walk calc.) hit_delay[ :, :, plsr_dac_index ] = a_scurve_mean # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.) progress_bar.update(index) for index, node in enumerate(in_file_h5.root.HistsTot): # loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r"\d+", node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16)) # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels delay_calibration_mean = np.mean( np.array(delay_calibration_data[2:])[np.isfinite(np.array(delay_calibration_data[2:]))] ) delay_calibration, delay_calibration_error = curve_fit( lambda x, par: (par), injection_delays, delay_calibration_data, p0=delay_calibration_mean, sigma=delay_calibration_data_error, absolute_sigma=True, ) delay_calibration, delay_calibration_error = delay_calibration[0], delay_calibration_error[0][0] progress_bar.finish() # Save time walk / hit delay hists with tb.open_file(self.output_filename + "_analyzed.h5", mode="r+") as out_file_h5: timewalk_result = np.swapaxes(timewalk, 0, 1) hit_delay_result = np.swapaxes(hit_delay, 0, 1) out = out_file_h5.createCArray( out_file_h5.root, name="HistPixelTimewalkPerPlsrDac", title="Time walk per pixel and PlsrDAC", atom=tb.Atom.from_dtype(timewalk_result.dtype), shape=timewalk_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_2 = out_file_h5.createCArray( out_file_h5.root, name="HistPixelHitDelayPerPlsrDac", title="Hit delay per pixel and PlsrDAC", atom=tb.Atom.from_dtype(hit_delay_result.dtype), shape=hit_delay_result.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out_3 = out_file_h5.createCArray( out_file_h5.root, name="HistTotPerPlsrDac", title="Tot per PlsrDAC", atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False), ) out.attrs.dimensions = "column, row, PlsrDAC" out.attrs.delay_calibration = delay_calibration out.attrs.delay_calibration_error = delay_calibration_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = "column, row, PlsrDAC" out_2.attrs.delay_calibration = delay_calibration out_2.attrs.delay_calibration_error = delay_calibration_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = "PlsrDAC" out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk_result out_2[:] = hit_delay_result out_3[:] = tot # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels with tb.open_file(self.output_filename + "_analyzed.h5", mode="r") as in_file_h5: def plot_hit_delay( hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None ): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind="slinear", bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape( hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1] ) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor("white") ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.amax(charge_values))) ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err))) ax.plot(charge_values, y, ".-", color="black", label=title) if threshold is not None: ax.plot( [threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle="--", color="black", label="Threshold\n%d e" % (threshold), ) ax.fill_between( charge_values, y - y_err, y + y_err, color="gray", alpha=0.5, facecolor="gray", label="RMS" ) ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment="center", fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_rel_timewalk = np.amax(hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk hist_rel_hit_delay = np.mean(hist_hit_delay[:, :, -1]) - hist_hit_delay # Create mask and apply for bad pixels mask = np.ones(hist_rel_timewalk.shape, dtype=np.int8) for node in in_file_h5.root.PixelHistsMeanRelBcid: pixel_data = node[:, :, :] a = np.sum(pixel_data, axis=2) mask[np.isfinite(a), :] = 0 hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask) hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask) output_pdf = PdfPages(self.output_filename + ".pdf") plot_hit_delay( np.swapaxes(hist_rel_timewalk, 0, 1) * 25.0 / delay_calibration, charge_values=charge_values, title="Time walk", xlabel="Charge [e]", ylabel="Time walk [ns]", filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot, ) plot_hit_delay( np.swapaxes(hist_rel_hit_delay, 0, 1) * 25.0 / delay_calibration, charge_values=charge_values, title="Hit delay", xlabel="Charge [e]", ylabel="Hit delay [ns]", filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot, ) plot_scurves( np.swapaxes(hist_rel_timewalk, 0, 1), scan_parameters=charge_values, title="Timewalk of the FE-I4", scan_parameter_name="Charge [e]", ylabel="Timewalk [ns]", min_x=0, y_scale=25.0 / delay_calibration, filename=output_pdf, ) plot_scurves( np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title="Hit delay (T0) with internal charge injection\nof the FE-I4", scan_parameter_name="Charge [e]", ylabel="Hit delay [ns]", min_x=0, y_scale=25.0 / delay_calibration, filename=output_pdf, ) for i in [ 0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1, ]: # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plot_three_way( hist_rel_timewalk[:, :, i] * 25.0 / delay_calibration, title="Time walk at %.0f e" % (charge_values[i]), x_axis_title="Time walk [ns]", filename=output_pdf, ) plot_three_way( hist_hit_delay[:, :, i] * 25.0 / delay_calibration, title="Hit delay (T0) with internal charge injection at %.0f e" % (charge_values[i]), x_axis_title="Hit delay [ns]", minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf, ) output_pdf.close()
def analyze_injected_charge(data_analyzed_file): logging.info('Analyze the injected charge') with tb.openFile(data_analyzed_file, mode="r") as in_file_h5: occupancy = in_file_h5.root.HistOcc[:].T gdacs = analysis_utils.get_scan_parameter( in_file_h5.root.meta_data[:])['GDAC'] with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file: plotting.plot_scatter( gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file) if analysis_configuration['input_file_calibration']: with tb.openFile( analysis_configuration['input_file_calibration'], mode="r" ) as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:] threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:] gdac_range_calibration = np.array( in_file_calibration_h5.root.HistThresholdCalibration. _v_attrs.scan_parameter_values) gdac_range_source_scan = gdacs # Select data that is within the given GDAC range, (min_gdac, max_gdac) sel = np.where( np.logical_and( gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0] gdac_range_source_scan = gdac_range_source_scan[sel] occupancy = occupancy[:, :, sel] sel = np.where( np.logical_and( gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0] gdac_range_calibration = gdac_range_calibration[sel] threshold_calibration_array = threshold_calibration_array[:, :, sel] logging.info( 'Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan))) logging.info( 'Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration))) # rate_normalization of the total hit number for each GDAC setting rate_normalization = 1. if analysis_configuration['normalize_rate']: rate_normalization = analysis_utils.get_rate_normalization( hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration[ 'normalization_reference'], plot=analysis_configuration['plot_normalization']) # correcting the hit numbers for the different cluster sizes correction_factors = 1. if analysis_configuration['use_cluster_rate_correction']: correction_h5 = tb.openFile(cluster_sizes_file, mode="r") cluster_size_histogram = correction_h5.root.AllHistClusterSize[:] correction_factors = analysis_utils.get_hit_rate_correction( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram) if analysis_configuration['plot_cluster_sizes']: plot_cluster_sizes( correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan) pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array ) # interpolates the threshold at the source scan GDAC setting from the calibration pixel_hits = occupancy # create hit array with shape (col, row, ...) pixel_hits = pixel_hits * correction_factors * rate_normalization # choose region with pixels that have a sufficient occupancy but are not too hot good_pixel = analysis_utils.select_good_pixel_region( pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration[ 'min_cut_threshold'], max_cut_threshold=analysis_configuration[ 'max_cut_threshold']) pixel_mask = ~np.ma.getmaskarray(good_pixel) selected_pixel_hits = pixel_hits[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region selected_pixel_thresholds = pixel_thresholds[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region plotting.plot_occupancy( good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file) # reshape to one dimension x = selected_pixel_thresholds.flatten() y = selected_pixel_hits.flatten() # nothing should be NAN/INF, NAN/INF is not supported yet if np.isfinite(x).shape != x.shape or np.isfinite( y).shape != y.shape: logging.warning( 'There are pixels with NaN or INF threshold or hit values, analysis will fail' ) # calculated profile histogram x_p, y_p, y_p_e = analysis_utils.get_profile_histogram( x, y, n_bins=analysis_configuration['n_bins'] ) # profile histogram data # select only the data point where the calibration worked selected_data = np.logical_and( x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration']) x_p = x_p[selected_data] y_p = y_p[selected_data] y_p_e = y_p_e[selected_data] if len(y_p_e[y_p_e == 0]) != 0: logging.warning( 'There are bins without any data, guessing the error bars' ) y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0]) smoothed_data = analysis_utils.smooth_differentiation( x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0) smoothed_data_diff = analysis_utils.smooth_differentiation( x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1) with tb.openFile(data_analyzed_file[:-3] + '_result.h5', mode="w") as out_file_h5: result_1 = np.rec.array(np.column_stack( (x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)]) result_2 = np.rec.array(np.column_stack( (x_p, smoothed_data)), dtype=[('charge', float), ('count', float)]) result_3 = np.rec.array(np.column_stack( (x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)]) out_1 = out_file_h5.create_table( out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title= 'Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.create_table( out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title= 'Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.create_table( out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title= 'Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) for key, value in analysis_configuration.iteritems(): out_1.attrs[key] = value out_2.attrs[key] = value out_3.attrs[key] = value out_1.append(result_1) out_2.append(result_2) out_3.append(result_3) plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff) # calculate and plot mean results x_mean = analysis_utils.get_mean_threshold_from_calibration( gdac_range_source_scan, mean_threshold_calibration) y_mean = selected_pixel_hits.mean(axis=(0)) plotting.plot_scatter( np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title= 'Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file) plotting.plot_scatter( x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title= 'Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file) if analysis_configuration['use_cluster_rate_correction']: correction_h5.close()
def analyze(self): # plsr_dac_slope = self.register.calibration_parameters['C_Inj_High'] * self.register.calibration_parameters['Vcal_Coeff_1'] plsr_dac_slope = 55. # Interpret data and create hit table with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output( False) # a lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(self.output_filename + '_analyzed.h5', mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid') hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid') hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot') hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot') hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot') def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac)) bcid_mean_array = np.average( bcid_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype( 'f4' ) # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = np.average( tot_pixel_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype( 'f4' ) # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.createCArray( hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, injection delay' out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.createCArray( hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2.attrs.dimensions = 'column, row, injection delay, relative bcid' out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.createCArray( hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3.attrs.dimensions = 'column, row, injection delay' out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.createCArray( hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4.attrs.dimensions = 'column, row, injection delay' out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.createCArray( hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(self.output_filename + '_interpreted.h5', 'r') as in_file_h5: scan_parameters_dict = get_scan_parameter( in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict['PlsrDAC'] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[scan_parameters_dict.keys( )[1]] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int16) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16, ), dtype=np.int32) # tot array of actual PlsrDAC logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=max(plsr_dac) - min(plsr_dac), term_width=80) for index, (parameters, hits) in enumerate( get_hits_of_scan_parameter(self.output_filename + '_interpreted.h5', scan_parameters, chunk_size=1.5e7)): if index == 0: progress_bar.start( ) # start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[ 0], parameters[1] column, row, rel_bcid, tot = hits['column'] - 1, hits[ 'row'] - 1, hits['relative_BCID'], hits['tot'] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16, )) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.int8) tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.int8) tot_array = np.zeros((16, ), dtype=np.int32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where( np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast else: # save histograms of last PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: # Create temporary result data structures plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values timewalk = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array tot = np.zeros(shape=(len(plsr_dac_values), ), dtype=np.float16) # result array hit_delay = np.zeros(shape=(80, 336, len(plsr_dac_values)), dtype=np.int8) # result array min_rel_bcid = np.zeros( shape=(80, 336), dtype=np.int8 ) # Temp array to make sure that the Scurve from the same BCID is used delay_calibration_data = [] delay_calibration_data_error = [] # Calculate the minimum BCID. That is chosen to calculate the hit delay. Calculation does not have to work. plsr_dac_min = min(plsr_dac_values) rel_bcid_min_injection = in_file_h5.get_node( in_file_h5.root.PixelHistsMeanRelBcid, 'HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % plsr_dac_min) injection_delays = np.array( rel_bcid_min_injection.attrs.injection_delay_values) injection_delay_min = np.where( injection_delays == np.amax(injection_delays))[0][0] bcid_min = int( round( np.mean( np.ma.masked_array( rel_bcid_min_injection[:, :, injection_delay_min], np.isnan( rel_bcid_min_injection[:, :, injection_delay_min])))) ) - 1 # Info output with progressbar logging.info('Create timewalk info for PlsrDACs ' + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len(plsr_dac_values), term_width=80) progress_bar.start() for index, node in enumerate( in_file_h5.root.PixelHistsMeanRelBcid ): # loop over all mean relative BCID hists for all PlsrDAC values # Select the S-curves pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape( pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]) # Reshape for interpolation of Nans nans, x = np.isnan(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp( x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape( pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]) # Reshape after interpolation of Nans pixel_data_round = np.round(pixel_data_fixed) pixel_data_round_diff = np.diff(pixel_data_round, axis=2) index_sel = np.where( np.logical_and(pixel_data_round_diff > 0., np.isfinite(pixel_data_round_diff))) # Temporary result histograms to be filled first_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the first S-curve in the data for the lowest injection (for time walk) second_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the second S-curve in the data (to calibrate one inj. delay step) a_scurve_mean = np.zeros( shape=(80, 336), dtype=np.int8 ) # the mean of the S-curve at a given rel. BCID (for hit delay) # Loop over the S-curve means for (row_index, col_index, delay_index) in np.column_stack( (index_sel)): delay = injection_delays[delay_index] if first_scurve_mean[col_index, row_index] == 0: if delay_index == 0: # ignore the first index, can be wrong due to nan filling continue if pixel_data_round[ row_index, col_index, delay] >= min_rel_bcid[ col_index, row_index]: # make sure to always use the data of the same BCID first_scurve_mean[col_index, row_index] = delay min_rel_bcid[col_index, row_index] = pixel_data_round[ row_index, col_index, delay] elif second_scurve_mean[col_index, row_index] == 0 and ( delay - first_scurve_mean[col_index, row_index] ) > 20: # minimum distance 10, can otherwise be data 'jitter' second_scurve_mean[col_index, row_index] = delay if pixel_data_round[row_index, col_index, delay] == bcid_min: if a_scurve_mean[col_index, row_index] == 0: a_scurve_mean[col_index, row_index] = delay plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] if (np.count_nonzero(first_scurve_mean) - np.count_nonzero(a_scurve_mean)) > 1e3: logging.warning( "The common BCID to find the absolute hit delay was set wrong! Hit delay calculation will be wrong." ) selection = (second_scurve_mean - first_scurve_mean)[np.logical_and( second_scurve_mean > 0, first_scurve_mean < second_scurve_mean)] delay_calibration_data.append(np.mean(selection)) delay_calibration_data_error.append(np.std(selection)) # Store the actual PlsrDAC data into result hist timewalk[:, :, plsr_dac_index] = first_scurve_mean # Save the plsr delay of first s-curve (for time walk calc.) hit_delay[:, :, plsr_dac_index] = a_scurve_mean # Save the plsr delay of s-curve of fixed rel. BCID (for hit delay calc.) progress_bar.update(index) for index, node in enumerate( in_file_h5.root.HistsTot ): # loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram( tot_data, range(16)) # Calibrate the step size of the injection delay by the average difference of two Scurves of all pixels delay_calibration_mean = np.mean( np.array(delay_calibration_data[2:])[np.isfinite( np.array(delay_calibration_data[2:]))]) delay_calibration, delay_calibration_error = curve_fit( lambda x, par: (par), injection_delays, delay_calibration_data, p0=delay_calibration_mean, sigma=delay_calibration_data_error, absolute_sigma=True) delay_calibration, delay_calibration_error = delay_calibration[ 0], delay_calibration_error[0][0] progress_bar.finish() # Save time walk / hit delay hists with tb.open_file(self.output_filename + '_analyzed.h5', mode="r+") as out_file_h5: timewalk_result = np.swapaxes(timewalk, 0, 1) hit_delay_result = np.swapaxes(hit_delay, 0, 1) out = out_file_h5.createCArray( out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk_result.dtype), shape=timewalk_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray( out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay_result.dtype), shape=hit_delay_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.createCArray( out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, PlsrDAC' out.attrs.delay_calibration = delay_calibration out.attrs.delay_calibration_error = delay_calibration_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = 'column, row, PlsrDAC' out_2.attrs.delay_calibration = delay_calibration out_2.attrs.delay_calibration_error = delay_calibration_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = 'PlsrDAC' out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk_result out_2[:] = hit_delay_result out_3[:] = tot # Mask the pixels that have non valid data an create plot with the relative time walk for all pixels with tb.open_file(self.output_filename + '_analyzed.h5', mode="r") as in_file_h5: def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape( hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.amax(charge_values))) ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err))) ax.plot(charge_values, y, '.-', color='black', label=title) if threshold is not None: ax.plot([threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold)) ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS') ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values delay_calibration = in_file_h5.root.HistPixelHitDelayPerPlsrDac._v_attrs.delay_calibration charge_values = np.array(plsr_dac_values)[:] * plsr_dac_slope hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_rel_timewalk = np.amax( hist_timewalk, axis=2)[:, :, np.newaxis] - hist_timewalk hist_rel_hit_delay = np.mean(hist_hit_delay[:, :, -1]) - hist_hit_delay # Create mask and apply for bad pixels mask = np.ones((336, 80, 50), dtype=np.int8) for node in in_file_h5.root.PixelHistsMeanRelBcid: pixel_data = node[:, :, :] a = (np.sum(pixel_data, axis=2)) mask[np.isfinite(a), :] = 0 hist_rel_timewalk = np.ma.masked_array(hist_rel_timewalk, mask) hist_hit_delay = np.ma.masked_array(hist_hit_delay, mask) output_pdf = PdfPages(self.output_filename + '.pdf') plot_hit_delay(np.swapaxes(hist_rel_timewalk, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_hit_delay(np.swapaxes(hist_rel_hit_delay, 0, 1) * 25. / delay_calibration, charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_scurves(np.swapaxes(hist_rel_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) plot_scurves( np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title= 'Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, y_scale=25. / delay_calibration, filename=output_pdf) for i in [ 0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1 ]: # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plotThreeWay(hist_rel_timewalk[:, :, i] * 25. / delay_calibration, title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf) plotThreeWay( hist_hit_delay[:, :, i] * 25. / delay_calibration, title= 'Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf) output_pdf.close()
def analyze_injected_charge(data_analyzed_file): logging.info('Analyze the injected charge') with tb.openFile(data_analyzed_file, mode="r") as in_file_h5: occupancy = in_file_h5.root.HistOcc[:].T gdacs = analysis_utils.get_scan_parameter(in_file_h5.root.meta_data[:])['GDAC'] with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file: plotting.plot_scatter(gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file) if analysis_configuration['input_file_calibration']: with tb.openFile(analysis_configuration['input_file_calibration'], mode="r") as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:] threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:] gdac_range_calibration = np.array(in_file_calibration_h5.root.HistThresholdCalibration._v_attrs.scan_parameter_values) gdac_range_source_scan = gdacs # Select data that is within the given GDAC range, (min_gdac, max_gdac) sel = np.where(np.logical_and(gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0] gdac_range_source_scan = gdac_range_source_scan[sel] occupancy = occupancy[:, :, sel] sel = np.where(np.logical_and(gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0] gdac_range_calibration = gdac_range_calibration[sel] threshold_calibration_array = threshold_calibration_array[:, :, sel] logging.info('Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan))) logging.info('Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration))) # rate_normalization of the total hit number for each GDAC setting rate_normalization = 1. if analysis_configuration['normalize_rate']: rate_normalization = analysis_utils.get_rate_normalization(hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration['normalization_reference'], plot=analysis_configuration['plot_normalization']) # correcting the hit numbers for the different cluster sizes correction_factors = 1. if analysis_configuration['use_cluster_rate_correction']: correction_h5 = tb.openFile(cluster_sizes_file, mode="r") cluster_size_histogram = correction_h5.root.AllHistClusterSize[:] correction_factors = analysis_utils.get_hit_rate_correction(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram) if analysis_configuration['plot_cluster_sizes']: plot_cluster_sizes(correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan) pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array) # interpolates the threshold at the source scan GDAC setting from the calibration pixel_hits = occupancy # create hit array with shape (col, row, ...) pixel_hits = pixel_hits * correction_factors * rate_normalization # choose region with pixels that have a sufficient occupancy but are not too hot good_pixel = analysis_utils.select_good_pixel_region(pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration['min_cut_threshold'], max_cut_threshold=analysis_configuration['max_cut_threshold']) pixel_mask = ~np.ma.getmaskarray(good_pixel) selected_pixel_hits = pixel_hits[pixel_mask, :] # reduce the data to pixels that are in the good pixel region selected_pixel_thresholds = pixel_thresholds[pixel_mask, :] # reduce the data to pixels that are in the good pixel region plotting.plot_occupancy(good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file) # reshape to one dimension x = selected_pixel_thresholds.flatten() y = selected_pixel_hits.flatten() # nothing should be NAN/INF, NAN/INF is not supported yet if np.isfinite(x).shape != x.shape or np.isfinite(y).shape != y.shape: logging.warning('There are pixels with NaN or INF threshold or hit values, analysis will fail') # calculated profile histogram x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(x, y, n_bins=analysis_configuration['n_bins']) # profile histogram data # select only the data point where the calibration worked selected_data = np.logical_and(x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration']) x_p = x_p[selected_data] y_p = y_p[selected_data] y_p_e = y_p_e[selected_data] if len(y_p_e[y_p_e == 0]) != 0: logging.warning('There are bins without any data, guessing the error bars') y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0]) smoothed_data = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0) smoothed_data_diff = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1) with tb.openFile(data_analyzed_file[:-3] + '_result.h5', mode="w") as out_file_h5: result_1 = np.rec.array(np.column_stack((x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)]) result_2 = np.rec.array(np.column_stack((x_p, smoothed_data)), dtype=[('charge', float), ('count', float)]) result_3 = np.rec.array(np.column_stack((x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)]) out_1 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title='Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title='Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.create_table(out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title='Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) for key, value in analysis_configuration.iteritems(): out_1.attrs[key] = value out_2.attrs[key] = value out_3.attrs[key] = value out_1.append(result_1) out_2.append(result_2) out_3.append(result_3) plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff) # calculate and plot mean results x_mean = analysis_utils.get_mean_threshold_from_calibration(gdac_range_source_scan, mean_threshold_calibration) y_mean = selected_pixel_hits.mean(axis=(0)) plotting.plot_scatter(np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title='Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file) plotting.plot_scatter(x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title='Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file) if analysis_configuration['use_cluster_rate_correction']: correction_h5.close()
def create_hitor_calibration(output_filename): logging.info('Analyze and plot results of %s', output_filename) def plot_calibration(col_row_combinations, scan_parameter, calibration_data, filename): # Result calibration plot function for index, (column, row) in enumerate(col_row_combinations): logging.info("Plot calibration for pixel " + str(column) + '/' + str(row)) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.errorbar(scan_parameter, calibration_data[column - 1, row - 1, :, 0] * 25. + 25., yerr=[ calibration_data[column - 1, row - 1, :, 2] * 25, calibration_data[column - 1, row - 1, :, 2] * 25 ], fmt='o', label='FE-I4 ToT [ns]') ax.errorbar( scan_parameter, calibration_data[column - 1, row - 1, :, 1] * 1.5625, yerr=[ calibration_data[column - 1, row - 1, :, 3] * 1.5625, calibration_data[column - 1, row - 1, :, 3] * 1.5625 ], fmt='o', label='TDC ToT [ns]') ax.set_title('Calibration for pixel ' + str(column) + '/' + str(row)) ax.set_xlabel('Charge [PlsrDAC]') ax.set_ylabel('TOT') ax.legend(loc=0) filename.savefig(fig) if index > 100: # stop for too many plots logging.info( 'Do not create pixel plots for more than 100 pixels to safe time' ) break with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True ) as analyze_raw_data: # Interpret the raw data file analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.create_tdc_hist = True analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() n_injections = analyze_raw_data.n_injections # store number of injections for later cross check with tb.open_file( output_filename + '_interpreted.h5', 'r') as in_file_h5: # Get scan parameters from interpreted file meta_data = in_file_h5.root.meta_data[:] hits = in_file_h5.root.Hits[:] scan_parameters_dict = get_scan_parameter(meta_data) inner_loop_parameter_values = scan_parameters_dict[next( reversed( scan_parameters_dict))] # inner loop parameter name is unknown scan_parameter_names = scan_parameters_dict.keys() col_row_combinations = get_unique_scan_parameter_combinations( in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True) meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations( meta_data, scan_parameters=scan_parameter_names) parameter_values = get_scan_parameters_table_from_meta_data( meta_data_table_at_scan_parameter, scan_parameter_names) event_number_ranges = get_ranges_from_array( meta_data_table_at_scan_parameter['event_number']) event_ranges_per_parameter = np.column_stack( (parameter_values, event_number_ranges)) event_numbers = hits['event_number'].copy( ) # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays with tb.openFile(output_filename + "_calibration.h5", mode="w") as calibration_data_file: logging.info('Create calibration') output_pdf = PdfPages(output_filename + "_calibration.pdf") calibration_data = np.zeros( shape=(80, 336, len(inner_loop_parameter_values), 4), dtype='f4' ) # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC progress_bar = progressbar.ProgressBar( widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len(event_ranges_per_parameter), term_width=80) progress_bar.start() for index, (parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter): if event_stop is None: # happens for the last chunk event_stop = hits[-1]['event_number'] array_index = np.searchsorted( event_numbers, np.array([event_start, event_stop])) actual_hits = hits[array_index[0]:array_index[1]] actual_col, actual_row, parameter_value = parameter_values if len(hits[np.logical_and(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)]): logging.warning( 'There are %d hits from not selected pixels in the data', len(actual_hits[np.logical_and( actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)])) actual_hits = actual_hits[np.logical_and( actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] actual_tdc_hits = actual_hits[ (actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error) actual_tot_hits = actual_hits[ (actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC'] if tdc.shape[ 0] != n_injections and index == event_ranges_per_parameter.shape[ 0] - 1: logging.warning('There are %d != %d TDC hits for %s = %s', tdc.shape[0], n_injections, str(scan_parameter_names), str(parameter_values)) inner_loop_scan_parameter_index = np.where( parameter_value == inner_loop_parameter_values )[0][ 0] # translate the scan parameter value to an index for the result histogram calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc) progress_bar.update(index) calibration_data_out = calibration_data_file.createCArray( calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) calibration_data_out[:] = calibration_data calibration_data_out.attrs.dimensions = scan_parameter_names calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values plot_calibration(col_row_combinations, scan_parameter=inner_loop_parameter_values, calibration_data=calibration_data, filename=output_pdf) output_pdf.close() progress_bar.finish()
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): ''' This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed ''' logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits) if os.path.isfile( output_file_cluster_size ) and not overwrite_output_files: # skip analysis if already done logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.') else: with tb.open_file( output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters( complib='blosc', complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.create_group( out_file_h5.root, parameter, title=parameter) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.open_file( input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter( meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[ parameter] # scan parameter settings used if len( scan_parameter_values ) == 1: # only analyze per scan step if there are more than one scan step logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.') else: logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter( meta_data_array, parameter )['event_number'] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack( (scan_parameter_values, analysis_utils.get_ranges_from_array( event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter( ) # one has to tell histogram the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar( widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate( parameter_ranges ): # loop over the selected events analyze_data.reset( ) # resets the data of the last analysis logging.debug( 'Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str( int( float( float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[') actual_parameter_group = out_file_h5.create_group( parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0])) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events( hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits( hits ) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int( 1.05 * readout_hit_len ) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = analyze_data.histogram.get_occupancy( ) # just check here if histogram is consistent # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist( ) cluster_size_hist_table = out_file_h5.create_carray( actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype( cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size( hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index] ), filename=output_pdf) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack( [cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning( 'Analysis shows inconsistent number of hits. Check needed!' ) logging.info('Analyzed %d hits!', total_hits) cluster_size_total_out = out_file_h5.create_carray( out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
def analyze_injected_charge(data_analyzed_file): logging.info('Analyze the injected charge') with tb.openFile(data_analyzed_file, mode="r") as in_file_h5: occupancy = in_file_h5.root.HistOcc[:] gdacs = analysis_utils.get_scan_parameter( in_file_h5.root.meta_data[:])['GDAC'] with tb.openFile( analysis_configuration['input_file_calibration'], mode="r" ) as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:] threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:] gdac_range_calibration = mean_threshold_calibration['gdac'] gdac_range_source_scan = gdacs logging.info( 'Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d' % (len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan)))) logging.info( 'Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d' % (len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration)))) # rate_normalization of the total hit number for each GDAC setting rate_normalization = 1. if analysis_configuration['normalize_rate']: rate_normalization = analysis_utils.get_rate_normalization( hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration[ 'normalization_reference'], plot=analysis_configuration['plot_normalization']) # correcting the hit numbers for the different cluster sizes correction_factors = 1. if analysis_configuration['use_cluster_rate_correction']: correction_h5 = tb.openFile(cluster_sizes_file, mode="r") cluster_size_histogram = correction_h5.root.AllHistClusterSize[:] correction_factors = analysis_utils.get_hit_rate_correction( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram) if analysis_configuration['plot_cluster_sizes']: plot_cluster_sizes(correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan) print correction_factors pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array ) # interpolates the threshold at the source scan GDAC setting from the calibration pixel_hits = np.swapaxes( occupancy, 0, 1) # create hit array with shape (col, row, ...) pixel_hits = pixel_hits * correction_factors * rate_normalization # choose region with pixels that have a sufficient occupancy but are not too hot good_pixel = analysis_utils.select_good_pixel_region( pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration['min_cut_threshold'], max_cut_threshold=analysis_configuration['max_cut_threshold']) pixel_mask = ~np.ma.getmaskarray(good_pixel) selected_pixel_hits = pixel_hits[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region selected_pixel_thresholds = pixel_thresholds[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region plotting.plot_occupancy(good_pixel.T, title='Select ' + str(len(selected_pixel_hits)) + ' pixels for analysis') # reshape to one dimension x = selected_pixel_thresholds.flatten() y = selected_pixel_hits.flatten() #nothing should be NAN, NAN is not supported yet if np.isfinite(x).shape != x.shape or np.isfinite( y).shape != y.shape: logging.warning( 'There are pixels with NaN or INF threshold or hit values, analysis will fail' ) # calculated profile histogram x_p, y_p, y_p_e = analysis_utils.get_profile_histogram( x, y, n_bins=analysis_configuration['n_bins'] ) # profile histogram data # select only the data point where the calibration worked selected_data = np.logical_and( x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration']) x_p = x_p[selected_data] y_p = y_p[selected_data] y_p_e = y_p_e[selected_data] plot_result(x_p, y_p, y_p_e) # calculate and plot mean results x_mean = analysis_utils.get_mean_threshold_from_calibration( gdac_range_source_scan, mean_threshold_calibration) y_mean = selected_pixel_hits.mean(axis=(0)) plotting.plot_scatter( np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title='Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate') plotting.plot_scatter( x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title='Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate') if analysis_configuration['use_cluster_rate_correction']: correction_h5.close()
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False): '''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data. Parameters ---------- output_filename : string Input raw data file name. plot_pixel_calibrations : bool, iterable If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels. Returns ------- nothing ''' logging.info('Analyze HitOR calibration data and plot results of %s', output_filename) with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True ) as analyze_raw_data: # Interpret the raw data file analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histogramming analyze_raw_data.create_hit_table = True analyze_raw_data.create_tdc_hist = True analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() n_injections = analyze_raw_data.n_injections # use later with tb.open_file( analyze_raw_data._analyzed_data_file, 'r' ) as in_file_h5: # Get scan parameters from interpreted file meta_data = in_file_h5.root.meta_data[:] scan_parameters_dict = get_scan_parameter(meta_data) inner_loop_parameter_values = scan_parameters_dict[next( reversed(scan_parameters_dict) )] # inner loop parameter name is unknown scan_parameter_names = scan_parameters_dict.keys() # col_row_combinations = get_unique_scan_parameter_combinations(in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True) meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations( meta_data, scan_parameters=scan_parameter_names) scan_parameter_values = get_scan_parameters_table_from_meta_data( meta_data_table_at_scan_parameter, scan_parameter_names) event_number_ranges = get_ranges_from_array( meta_data_table_at_scan_parameter['event_number']) event_ranges_per_parameter = np.column_stack( (scan_parameter_values, event_number_ranges)) hits = in_file_h5.root.Hits[:] event_numbers = hits['event_number'].copy( ) # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays output_filename = os.path.splitext(output_filename)[0] with tb.open_file(output_filename + "_calibration.h5", mode="w") as calibration_data_file: logging.info('Create calibration') calibration_data = np.full( shape=(80, 336, len(inner_loop_parameter_values), 4), fill_value=np.nan, dtype='f4' ) # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC progress_bar = progressbar.ProgressBar( widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len(event_ranges_per_parameter), term_width=80) progress_bar.start() for index, ( actual_scan_parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter): if event_stop is None: # happens for the last chunk event_stop = hits[-1]['event_number'] + 1 array_index = np.searchsorted( event_numbers, np.array([event_start, event_stop])) actual_hits = hits[array_index[0]:array_index[1]] for item_index, item in enumerate(scan_parameter_names): if item == "column": actual_col = actual_scan_parameter_values[ item_index] elif item == "row": actual_row = actual_scan_parameter_values[ item_index] elif item == "PlsrDAC": plser_dac = actual_scan_parameter_values[ item_index] else: raise ValueError("Unknown scan parameter %s" % item) # Only pixel of actual column/row should be in the actual data chunk but since SRAM is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case n_wrong_pixel = np.count_nonzero( np.logical_or(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)) if n_wrong_pixel != 0: logging.warning( '%d hit(s) from other pixels for scan parameters %s', n_wrong_pixel, ', '.join([ '%s=%s' % (name, value) for (name, value ) in zip(scan_parameter_names, actual_scan_parameter_values) ])) actual_hits = actual_hits[np.logical_and( actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] # Only take data from selected pixel actual_tdc_hits = actual_hits[ (actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error) actual_tot_hits = actual_hits[ (actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC'] if tdc.shape[0] < n_injections: logging.info( '%d of %d expected TDC hits for scan parameters %s', tdc.shape[0], n_injections, ', '.join([ '%s=%s' % (name, value) for (name, value ) in zip(scan_parameter_names, actual_scan_parameter_values) ])) if tot.shape[0] < n_injections: logging.info( '%d of %d expected hits for scan parameters %s', tot.shape[0], n_injections, ', '.join([ '%s=%s' % (name, value) for (name, value ) in zip(scan_parameter_names, actual_scan_parameter_values) ])) inner_loop_scan_parameter_index = np.where( plser_dac == inner_loop_parameter_values )[0][ 0] # translate the scan parameter value to an index for the result histogram # numpy mean and std return nan if array is empty calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc) progress_bar.update(index) progress_bar.finish() calibration_data_out = calibration_data_file.create_carray( calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) calibration_data_out[:] = calibration_data calibration_data_out.attrs.dimensions = scan_parameter_names calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values calibration_data_out.flush() # with PdfPages(output_filename + "_calibration.pdf") as output_pdf: plot_scurves(calibration_data[:, :, :, 0], inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf) plot_scurves(calibration_data[:, :, :, 1], inner_loop_parameter_values, "TDC calibration", "TDC [ns]", None, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf) tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0], axis=(0, 1)) tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0], axis=(0, 1)) tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1], axis=(0, 1)) tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1], axis=(0, 1)) plot_tot_tdc_calibration( scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, tdc_mean=tdc_mean_all_pix, tdc_error=tdc_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all( np.isnan(calibration_data[:, :, :, 0]), axis=2))) # plotting individual pixels if plot_pixel_calibrations is True: # selecting pixels with non-nan entries col_row_non_nan = np.nonzero(~np.all( np.isnan(calibration_data[:, :, :, 0]), axis=2)) plot_pixel_calibrations = np.dstack(col_row_non_nan)[0] elif plot_pixel_calibrations is False: plot_pixel_calibrations = np.array([], dtype=np.int) else: # assuming list of column / row tuples plot_pixel_calibrations = np.array( plot_pixel_calibrations) - 1 # generate index array pixel_indices = np.arange(plot_pixel_calibrations.shape[0]) plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array np.random.seed(0) # select random pixels if pixel_indices.size - 2 * plot_n_pixels >= 0: random_pixel_indices = np.sort( np.random.choice( pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False)) else: random_pixel_indices = np.array([], dtype=np.int) selected_pixel_indices = np.unique( np.hstack([ pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:] ])) # plotting individual pixels for (column, row) in plot_pixel_calibrations[selected_pixel_indices]: logging.info( "Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1)) tot_mean_single_pix = calibration_data[column, row, :, 0] tot_std_single_pix = calibration_data[column, row, :, 2] tdc_mean_single_pix = calibration_data[column, row, :, 1] tdc_std_single_pix = calibration_data[column, row, :, 3] plot_tot_tdc_calibration( scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, tot_error=tot_std_single_pix, tdc_mean=tdc_mean_single_pix, tdc_error=tdc_std_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
def analyze(self): logging.info('Analyze and plot results') def plot_calibration(col_row_combinations, scan_parameter, calibration_data, repeat_command, filename): # Result calibration plot function for index, (column, row) in enumerate(col_row_combinations): logging.info("Plot calibration for pixel " + str(column) + '/' + str(row)) fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.errorbar( scan_parameter, calibration_data[column - 1, row - 1, :, 0] * 25. + 25., yerr=[ calibration_data[column - 1, row - 1, :, 2] * 25, calibration_data[column - 1, row - 1, :, 2] * 25 ], fmt='o', label='FE-I4 ToT [ns]') ax.errorbar( scan_parameter, calibration_data[column - 1, row - 1, :, 1] * 1.5625, yerr=[ calibration_data[column - 1, row - 1, :, 3] * 1.5625, calibration_data[column - 1, row - 1, :, 3] * 1.5625 ], fmt='o', label='TDC ToT [ns]') ax.set_title('Calibration for pixel ' + str(column) + '/' + str(row) + '; ' + str(repeat_command) + ' injections per setting') ax.set_xlabel('Charge [PlsrDAC]') ax.set_ylabel('TOT') ax.legend(loc=0) filename.savefig(fig) if index > 100: # stop for too many plots break with AnalyzeRawData( raw_data_file=self.output_filename, create_pdf=True ) as analyze_raw_data: # Interpret the raw data file analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.create_tdc_hist = True analyze_raw_data.interpreter.use_tdc_word( True ) # align events at TDC words, first word of event has to be a tdc word analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() with tb.open_file( self.output_filename + '_interpreted.h5', 'r' ) as in_file_h5: # Get scan parameters from interpreted file scan_parameters_dict = get_scan_parameter( in_file_h5.root.meta_data[:]) inner_loop_parameter_values = scan_parameters_dict[next( reversed(scan_parameters_dict) )] # inner loop parameter name is unknown scan_parameter_names = scan_parameters_dict.keys() n_par_combinations = len( get_unique_scan_parameter_combinations( in_file_h5.root.meta_data[:])) col_row_combinations = get_unique_scan_parameter_combinations( in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True) with tb.openFile(self.output_filename + "_calibration.h5", mode="w") as calibration_data_file: logging.info('Create calibration') output_pdf = PdfPages(self.output_filename + "_calibration.pdf") calibration_data = np.zeros( shape=(80, 336, len(inner_loop_parameter_values), 4), dtype='f4' ) # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=n_par_combinations, term_width=80) old_scan_parameters = None tot_data = None tdc_data = None for index, (actual_scan_parameters, hits) in enumerate( get_hits_of_scan_parameter(self.output_filename + '_interpreted.h5', scan_parameter_names, chunk_size=1.5e7)): if index == 0: progress_bar.start( ) # start after the event index is created to get reasonable ETA actual_col, actual_row, _ = actual_scan_parameters if len(hits[np.logical_and(hits['column'] != actual_col, hits['row'] != actual_row)]): logging.warning( 'There are %d hits from not selected pixels in the data' % len(hits[np.logical_and(hits['column'] != actual_col, hits['row'] != actual_row)])) hits = hits[ (hits['event_status'] & 0b0000011110001000) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error) column, row, tot, tdc = hits['column'], hits['row'], hits[ 'tot'], hits['TDC'] if old_scan_parameters != actual_scan_parameters: # Store the data of the actual PlsrDAC value if old_scan_parameters: # Special case for the first PlsrDAC setting inner_loop_scan_parameter_index = np.where( old_scan_parameters[-1] == inner_loop_parameter_values )[0][ 0] # translate the scan parameter value to an index for the result histogram calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc_data) progress_bar.update(index) tot_data = np.array(tot) tdc_data = np.array(tdc) old_scan_parameters = actual_scan_parameters else: np.concatenate((tot_data, tot)) np.concatenate((tdc_data, tdc)) else: inner_loop_scan_parameter_index = np.where( old_scan_parameters[-1] == inner_loop_parameter_values )[0][ 0] # translate the scan parameter value to an index for the result histogram calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot_data) calibration_data[column - 1, row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc_data) calibration_data_out = calibration_data_file.createCArray( calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) calibration_data_out[:] = calibration_data calibration_data_out.attrs.dimensions = scan_parameter_names calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values plot_calibration(col_row_combinations, scan_parameter=inner_loop_parameter_values, calibration_data=calibration_data, repeat_command=self.repeat_command, filename=output_pdf) output_pdf.close() progress_bar.finish()
def analyze_hit_delay(raw_data_file): # Interpret data and create hit table with AnalyzeRawData(raw_data_file=raw_data_file, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output(False) # a lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() vcal_c0 = analyze_raw_data.vcal_c0 vcal_c1 = analyze_raw_data.vcal_c1 c_high = analyze_raw_data.c_high # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(raw_data_file + '_analyzed.h5', mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid') hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid') hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot') hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot') hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot') def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac)) bcid_mean_array = np.average(bcid_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype('f4') # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = np.average(tot_pixel_array, axis=3, weights=range(0, 16)) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype('f4') # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.createCArray(hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, injection delay' out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.createCArray(hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title='Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2.attrs.dimensions = 'column, row, injection delay, relative bcid' out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.createCArray(hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3.attrs.dimensions = 'column, row, injection delay' out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.createCArray(hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title='Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4.attrs.dimensions = 'column, row, injection delay' out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.createCArray(hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(raw_data_file + '_interpreted.h5', 'r') as in_file_h5: scan_parameters_dict = get_scan_parameter(in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict['PlsrDAC'] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[scan_parameters_dict.keys()[1]] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16,), dtype=np.uint32) # tot array of actual PlsrDAC logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac)) progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=max(plsr_dac) - min(plsr_dac), term_width=80) for index, (parameters, hits) in enumerate(get_hits_of_scan_parameter(raw_data_file + '_interpreted.h5', scan_parameters, try_speedup=True, chunk_size=10000000)): if index == 0: progress_bar.start() # start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[0], parameters[1] column, row, rel_bcid, tot = hits['column'] - 1, hits['row'] - 1, hits['relative_BCID'], hits['tot'] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16,)) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) tot_array = np.zeros((16,), dtype=np.uint32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where(np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) # save histograms of last PlsrDAC setting progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as in_file_h5: hists_folder = in_file_h5.create_group(in_file_h5.root, 'PixelHistsBcidJumps') plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values # Info output with progressbar logging.info('Detect BCID jumps with pixel based S-Curve fits for PlsrDACs ' + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(plsr_dac_values), term_width=80) progress_bar.start() for index, node in enumerate(in_file_h5.root.PixelHistsMeanRelBcid): # loop over all mean relative BCID hists for all PlsrDAC values and determine the BCID jumps actual_plsr_dac = int(re.search(r'\d+', node.name).group()) # actual node plsr dac value # Select the S-curves and interpolate Nans pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape(pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]) # Reshape for interpolation of Nans nans, x = ~np.isfinite(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp(x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape(pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]) # Reshape after interpolation of Nans # Fit all BCID jumps per pixel (1 - 2 jumps expected) with multithreading pixel_data_shaped = pixel_data_fixed.reshape(pixel_data_fixed.shape[0] * pixel_data_fixed.shape[1], pixel_data_fixed.shape[2]).tolist() pool = mp.Pool() # create as many workers as physical cores are available result_array = np.array(pool.map(fit_bcid_jumps, pixel_data_shaped)) pool.close() pool.join() result_array = result_array.reshape(pixel_data_fixed.shape[0], pixel_data_fixed.shape[1], 4) # Store array to file out = in_file_h5.createCArray(hists_folder, name='PixelHistsBcidJumpsPlsrDac_%03d' % actual_plsr_dac, title='BCID jumps per pixel for PlsrDAC ' + str(actual_plsr_dac), atom=tb.Atom.from_dtype(result_array.dtype), shape=result_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, BCID first jump, delay first jump, BCID second jump, delay second jump' out[:] = result_array progress_bar.update(index) # Calibrate the step size of the injection delay and create absolute and relative (=time walk) hit delay histograms with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as out_file_h5: # Calculate injection delay step size using the average difference of two Scurves of all pixels and plsrDAC settings and the minimum BCID to fix the absolute time scale differences = [] min_bcid = 15 for node in out_file_h5.root.PixelHistsBcidJumps: pixel_data = node[:, :, :] selection = (np.logical_and(pixel_data[:, :, 0] > 0, pixel_data[:, :, 2] > 0)) # select pixels with two Scurve fits difference = pixel_data[selection, 3] - pixel_data[selection, 1] # difference in delay settings between the scurves difference = difference[np.logical_and(difference > 15, difference < 60)] # get rid of bad data differences.extend(difference.tolist()) if np.amin(pixel_data[selection, 0]) < min_bcid: min_bcid = np.amin(pixel_data[selection, 0]) step_size = np.median(differences) # delay steps needed for 25 ns step_size_error = np.std(differences) # delay steps needed for 25 ns # Calculate the hit delay per pixel plsr_dac_values = out_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values hit_delay = np.zeros(shape=(336, 80, len(plsr_dac_values))) # result array for node in out_file_h5.root.PixelHistsBcidJumps: # loop over all BCID jump hists for all PlsrDAC values to calculate the hit delay actual_plsr_dac = int(re.search(r'\d+', node.name).group()) # actual node plsr dac value plsr_dac_index = np.where(plsr_dac_values == actual_plsr_dac)[0][0] pixel_data = node[:, :, :] actual_hit_delay = (pixel_data[:, :, 0] - min_bcid + 1) * 25. - pixel_data[:, :, 1] * 25. / step_size hit_delay[:, :, plsr_dac_index] = actual_hit_delay hit_delay = np.ma.masked_less(hit_delay, 0) timewalk = hit_delay - np.amin(hit_delay, axis=2)[:, :, np.newaxis] # time walk calc. by normalization to minimum for every pixel # Calculate the mean TOT per PlsrDAC (additional information, not needed for hit delay) tot = np.zeros(shape=(len(plsr_dac_values),), dtype=np.float16) # result array for node in out_file_h5.root.HistsTot: # loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16)) # Store the data out = out_file_h5.createCArray(out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk.dtype), shape=timewalk.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.createCArray(out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay.dtype), shape=hit_delay.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.createCArray(out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, PlsrDAC' out.attrs.delay_calibration = step_size out.attrs.delay_calibration_error = step_size_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = 'column, row, PlsrDAC' out_2.attrs.delay_calibration = step_size out_2.attrs.delay_calibration_error = step_size_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = 'PlsrDAC' out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk.filled(fill_value=np.NaN) out_2[:] = hit_delay.filled(fill_value=np.NaN) out_3[:] = tot # Mask the pixels that have non valid data and create plot with the time walk and hit delay for all pixels with tb.open_file(raw_data_file + '_analyzed.h5', mode="r") as in_file_h5: def plsr_dac_to_charge(plsr_dac, vcal_c0, vcal_c1, c_high): # TODO: take PlsrDAC calib from file voltage = vcal_c0 + vcal_c1 * plsr_dac return voltage * c_high / 0.16022 def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.amin(tot_values), tot <= np.amax(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape(hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.amax(charge_values))) ax.set_ylim((np.amin(y - y_err), np.amax(y + y_err))) ax.plot(charge_values, y, '.-', color='black', label=title) if threshold is not None: ax.plot([threshold, threshold], [np.amin(y - y_err), np.amax(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold)) ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS') ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.amax(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values charge_values = plsr_dac_to_charge(np.array(plsr_dac_values), vcal_c0, vcal_c1, c_high) hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_timewalk = np.ma.masked_invalid(hist_timewalk) hist_hit_delay = np.ma.masked_invalid(hist_hit_delay) output_pdf = PdfPages(raw_data_file + '_analyzed.pdf') plot_hit_delay(np.swapaxes(hist_timewalk, 0, 1), charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_hit_delay(np.swapaxes(hist_hit_delay, 0, 1), charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_scurves(np.swapaxes(hist_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, filename=output_pdf) plot_scurves(np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title='Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, filename=output_pdf) for i in [0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1]: # plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plot_three_way(hist_timewalk[:, :, i], title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf) plot_three_way(hist_hit_delay[:, :, i], title='Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.amax(hist_hit_delay[:, :, i]), filename=output_pdf) output_pdf.close()
def analyze(self): def analyze_raw_data_file(file_name): with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_tot_hist = False analyze_raw_data.create_fitted_threshold_hists = True analyze_raw_data.create_threshold_mask = True analyze_raw_data.interpreter.set_warning_output( True ) # so far the data structure in a threshold scan was always bad, too many warnings given analyze_raw_data.interpret_word_table() def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values): logging.info("Storing calibration data in a table...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table) threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table) for column in range(80): for row in range(336): for parameter_value_index, parameter_value in enumerate( parameter_values): threshold_calib_table.row['column'] = column threshold_calib_table.row['row'] = row threshold_calib_table.row[ 'parameter_value'] = parameter_value threshold_calib_table.row[ 'threshold'] = threshold_calibration[ column, row, parameter_value_index] threshold_calib_table.row.append() for parameter_value_index, parameter_value in enumerate( parameter_values): mean_threshold_calib_table.row[ 'parameter_value'] = parameter_value mean_threshold_calib_table.row[ 'mean_threshold'] = mean_threshold_calibration[ parameter_value_index] mean_threshold_calib_table.row[ 'threshold_rms'] = mean_threshold_rms_calibration[ parameter_value_index] mean_threshold_calib_table.row.append() threshold_calib_table.flush() mean_threshold_calib_table.flush() logging.info("done") def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration): logging.info("Storing calibration data in an array...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table) mean_threshold_calib_rms_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table) threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table) mean_threshold_calib_array[:] = mean_threshold_calibration mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration threshold_calib_array[:] = threshold_calibration logging.info("done") def mask_columns(pixel_array, ignore_columns): idx = np.array(ignore_columns) - 1 # from FE to Array columns m = np.zeros_like(pixel_array) m[:, idx] = 1 return np.ma.masked_array(pixel_array, m) calibration_file = self.output_filename + '_calibration' raw_data_files = analysis_utils.get_data_file_names_from_scan_base( self.output_filename, filter_file_words=['interpreted', 'calibration_calibration']) parameter_name = self.scan_parameters._fields[1] for raw_data_file in raw_data_files: # no using multithreading here, it is already used in fit analyze_raw_data_file(raw_data_file) files_per_parameter = analysis_utils.get_parameter_value_from_file_names( [ file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files ], parameter_name) logging.info("Create calibration from data") with tb.openFile( self.output_filename + '.h5', mode="r") as in_file_h5: # deduce settings from raw data file ignore_columns = in_file_h5.root.configuration.run_conf[:][ np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0] ignore_columns = ast.literal_eval(ignore_columns) mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ), dtype='<f8') mean_threshold_rms_calibration = np.empty( shape=(len(raw_data_files), ), dtype='<f8') threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8') if self.create_plots: logging.info('Saving calibration plots in: %s' % (calibration_file + '.pdf')) output_pdf = PdfPages(calibration_file + '.pdf') parameter_values = [] for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()): parameter_values.append(parameters.values()[0][0]) with tb.openFile(analyzed_data_file, mode="r") as in_file_h5: occupancy_masked = mask_columns( pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns ) # mask the not scanned columns for analysis and plotting thresholds_masked = mask_columns( pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns) if self.create_plots: plotThreeWay(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf) plsr_dacs = analysis_utils.get_scan_parameter( meta_data_array=in_file_h5.root.meta_data[:] )['PlsrDAC'] plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf) # fill the calibration data arrays mean_threshold_calibration[index] = np.ma.mean( thresholds_masked) mean_threshold_rms_calibration[index] = np.ma.std( thresholds_masked) threshold_calibration[:, :, index] = thresholds_masked.T with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5: store_calibration_data_as_array( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration) store_calibration_data_as_table( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values) if self.create_plots: plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf) plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf) output_pdf.close()
def analyze_cluster_size_per_scan_parameter( input_file_hits, output_file_cluster_size, parameter="GDAC", max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None, ): """ This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed """ logging.info("Analyze the cluster sizes for different " + parameter + " settings for " + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done logging.info( "Analyzed cluster size file " + output_file_cluster_size + " already exists. Skip cluster size analysis." ) else: with tb.openFile(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters(complib="blosc", complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.createGroup( out_file_h5.root, parameter, title=parameter ) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.openFile(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[parameter] # scan parameter settings used if ( len(scan_parameter_values) == 1 ): # only analyze per scan step if there are more than one scan step logging.warning( "The file " + str(input_file_hits) + " has no different " + str(parameter) + " parameter values. Omit analysis." ) else: logging.info( "Analyze " + input_file_hits + " per scan parameter " + parameter + " for " + str(len(scan_parameter_values)) + " values from " + str(np.amin(scan_parameter_values)) + " to " + str(np.amax(scan_parameter_values)) ) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)[ "event_number" ] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack( (scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers)) ) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histograming.set_no_scan_parameter() # one has to tell the histogramer the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar( widgets=[ "", progressbar.Percentage(), " ", progressbar.Bar(marker="*", left="|", right="|"), " ", analysis_utils.ETA(), ], maxval=hit_table.shape[0], term_width=80, ) progress_bar.start() for parameter_index, parameter_range in enumerate( parameter_ranges ): # loop over the selected events analyze_data.reset() # resets the data of the last analysis logging.debug( "Analyze GDAC = " + str(parameter_range[0]) + " " + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + "%" ) start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug( "Data from events = [" + str(start_event_number) + "," + str(stop_event_number) + "[" ) actual_parameter_group = out_file_h5.createGroup( parameter_goup, name=parameter + "_" + str(parameter_range[0]), title=parameter + "_" + str(parameter_range[0]), ) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = ( 0 ) # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events( hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start=index, chunk_size=chunk_size, ): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = ( int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size ) # to increase the readout speed, estimated the number of hits for one read instruction if ( chunk_size < 50 ): # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = ( analyze_data.histograming.get_occupancy() ) # just here to check histograming is consistend # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.createCArray( actual_parameter_group, name="HistClusterSize", title="Cluster Size Histogram", atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table, ) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size( hist=cluster_size_hist, title="Cluster size (" + str(np.sum(cluster_size_hist)) + " entries) for " + parameter + " = " + str(scan_parameter_values[parameter_index]), filename=output_pdf, ) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning("Analysis shows inconsistent number of hits. Check needed!") logging.info("Analyzed %d hits!", total_hits) cluster_size_total_out = out_file_h5.createCArray( out_file_h5.root, name="AllHistClusterSize", title="All Cluster Size Histograms", atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table, ) cluster_size_total_out[:] = cluster_size_total
def create_threshold_calibration(scan_base_file_name, create_plots=True): # Create calibration function, can be called stand alone def analyze_raw_data_file(file_name): if os.path.isfile(file_name[:-3] + '_interpreted.h5'): # skip analysis if already done logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.') else: with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_tot_hist = False analyze_raw_data.create_tot_pixel_hist = False analyze_raw_data.create_fitted_threshold_hists = True analyze_raw_data.create_threshold_mask = True analyze_raw_data.interpreter.set_warning_output(False) # RX errors would fill the console analyze_raw_data.interpret_word_table() def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values): logging.info("Storing calibration data in a table...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table) threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table) for column in range(80): for row in range(336): for parameter_value_index, parameter_value in enumerate(parameter_values): threshold_calib_table.row['column'] = column threshold_calib_table.row['row'] = row threshold_calib_table.row['parameter_value'] = parameter_value threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index] threshold_calib_table.row.append() for parameter_value_index, parameter_value in enumerate(parameter_values): mean_threshold_calib_table.row['parameter_value'] = parameter_value mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index] mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index] mean_threshold_calib_table.row.append() threshold_calib_table.flush() mean_threshold_calib_table.flush() logging.info("done") def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values): logging.info("Storing calibration data in an array...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table) mean_threshold_calib_rms_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table) threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table) mean_threshold_calib_array[:] = mean_threshold_calibration mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration threshold_calib_array[:] = threshold_calibration mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name] mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name] threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name] mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values threshold_calib_array.attrs.scan_parameter_values = parameter_values logging.info("done") def mask_columns(pixel_array, ignore_columns): idx = np.array(ignore_columns) - 1 # from FE to Array columns m = np.zeros_like(pixel_array) m[:, idx] = 1 return np.ma.masked_array(pixel_array, m) raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name, filter_file_words=['interpreted', 'calibration_calibration']) first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0] # multilpe scan_base_file_names for multiple runs with tb.openFile(first_scan_base_file_name + '.h5', mode="r") as in_file_h5: # deduce scan parameters from the first (and often only) scan base file name ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0] parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0] ignore_columns = ast.literal_eval(ignore_columns) parameter_name = ast.literal_eval(parameter_name)[1][0] calibration_file = first_scan_base_file_name + '_calibration' for raw_data_file in raw_data_files: # analyze each raw data file, not using multithreading here, it is already used in s-curve fit analyze_raw_data_file(raw_data_file) files_per_parameter = analysis_utils.get_parameter_value_from_file_names([file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True) logging.info("Create calibration from data") mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8') mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8') threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8') if create_plots: logging.info('Saving calibration plots in: %s', calibration_file + '.pdf') output_pdf = PdfPages(calibration_file + '.pdf') progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80) progress_bar.start() parameter_values = [] for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()): parameter_values.append(parameters.values()[0][0]) with tb.openFile(analyzed_data_file, mode="r") as in_file_h5: occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns) # mask the not scanned columns for analysis and plotting thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns) if create_plots: plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf) plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC'] plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf) # fill the calibration data arrays mean_threshold_calibration[index] = np.ma.mean(thresholds_masked) mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked) threshold_calibration[:, :, index] = thresholds_masked.T progress_bar.update(index) progress_bar.finish() with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5: store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values) store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values) if create_plots: plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf) plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf) output_pdf.close()
def analyze_hit_delay(raw_data_file): # Interpret data and create hit table with AnalyzeRawData(raw_data_file=raw_data_file, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_occupancy_hist = False # Too many scan parameters to do in ram histogramming analyze_raw_data.create_hit_table = True analyze_raw_data.interpreter.set_warning_output( False) # A lot of data produces unknown words analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() # Store calibration values in variables vcal_c0 = analyze_raw_data.vcal_c0 vcal_c1 = analyze_raw_data.vcal_c1 c_high = analyze_raw_data.c_high # Create relative BCID and mean relative BCID histogram for each pixel / injection delay / PlsrDAC setting with tb.open_file(raw_data_file + '_analyzed.h5', mode="w") as out_file_h5: hists_folder = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanRelBcid') hists_folder_2 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsRelBcid') hists_folder_3 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsTot') hists_folder_4 = out_file_h5.create_group(out_file_h5.root, 'PixelHistsMeanTot') hists_folder_5 = out_file_h5.create_group(out_file_h5.root, 'HistsTot') def store_bcid_histograms(bcid_array, tot_array, tot_pixel_array): logging.debug('Store histograms for PlsrDAC ' + str(old_plsr_dac)) bcid_mean_array = np.average( bcid_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(bcid_array, axis=3).astype( 'f4') # calculate the mean BCID per pixel and scan parameter tot_pixel_mean_array = np.average( tot_pixel_array, axis=3, weights=range(0, 16) ) * sum(range(0, 16)) / np.sum(tot_pixel_array, axis=3).astype( 'f4') # calculate the mean tot per pixel and scan parameter bcid_mean_result = np.swapaxes(bcid_mean_array, 0, 1) bcid_result = np.swapaxes(bcid_array, 0, 1) tot_pixel_result = np.swapaxes(tot_pixel_array, 0, 1) tot_mean_pixel_result = np.swapaxes(tot_pixel_mean_array, 0, 1) out = out_file_h5.create_carray( hists_folder, name='HistPixelMeanRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_mean_result.dtype), shape=bcid_mean_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, injection delay' out.attrs.injection_delay_values = injection_delay out[:] = bcid_mean_result out_2 = out_file_h5.create_carray( hists_folder_2, name='HistPixelRelBcidPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Relative BCID hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(bcid_result.dtype), shape=bcid_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2.attrs.dimensions = 'column, row, injection delay, relative bcid' out_2.attrs.injection_delay_values = injection_delay out_2[:] = bcid_result out_3 = out_file_h5.create_carray( hists_folder_3, name='HistPixelTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_pixel_result.dtype), shape=tot_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3.attrs.dimensions = 'column, row, injection delay' out_3.attrs.injection_delay_values = injection_delay out_3[:] = tot_pixel_result out_4 = out_file_h5.create_carray( hists_folder_4, name='HistPixelMeanTotPerDelayPlsrDac_%03d' % old_plsr_dac, title= 'Mean tot hist per pixel and different PlsrDAC delays for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_mean_pixel_result.dtype), shape=tot_mean_pixel_result.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_4.attrs.dimensions = 'column, row, injection delay' out_4.attrs.injection_delay_values = injection_delay out_4[:] = tot_mean_pixel_result out_5 = out_file_h5.create_carray( hists_folder_5, name='HistTotPlsrDac_%03d' % old_plsr_dac, title='Tot histogram for PlsrDAC ' + str(old_plsr_dac), atom=tb.Atom.from_dtype(tot_array.dtype), shape=tot_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_5.attrs.injection_delay_values = injection_delay out_5[:] = tot_array old_plsr_dac = None # Get scan parameters from interpreted file with tb.open_file(raw_data_file + '_interpreted.h5', 'r') as in_file_h5: scan_parameters_dict = get_scan_parameter( in_file_h5.root.meta_data[:]) plsr_dac = scan_parameters_dict['PlsrDAC'] hists_folder._v_attrs.plsr_dac_values = plsr_dac hists_folder_2._v_attrs.plsr_dac_values = plsr_dac hists_folder_3._v_attrs.plsr_dac_values = plsr_dac hists_folder_4._v_attrs.plsr_dac_values = plsr_dac injection_delay = scan_parameters_dict[scan_parameters_dict.keys( )[1]] # injection delay par name is unknown and should be in the inner loop scan_parameters = scan_parameters_dict.keys() bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) # bcid array of actual PlsrDAC tot_pixel_array = np.zeros( (80, 336, len(injection_delay), 16), dtype=np.uint16) # tot pixel array of actual PlsrDAC tot_array = np.zeros((16, ), dtype=np.uint32) # tot array of actual PlsrDAC logging.info('Store histograms for PlsrDAC values ' + str(plsr_dac)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=max(plsr_dac) - min(plsr_dac), term_width=80) for index, (parameters, hits) in enumerate( get_hits_of_scan_parameter(raw_data_file + '_interpreted.h5', scan_parameters, try_speedup=True, chunk_size=10000000)): if index == 0: progress_bar.start( ) # Start after the event index is created to get reasonable ETA actual_plsr_dac, actual_injection_delay = parameters[ 0], parameters[1] column, row, rel_bcid, tot = hits['column'] - 1, hits[ 'row'] - 1, hits['relative_BCID'], hits['tot'] bcid_array_fast = hist_3d_index(column, row, rel_bcid, shape=(80, 336, 16)) tot_pixel_array_fast = hist_3d_index(column, row, tot, shape=(80, 336, 16)) tot_array_fast = hist_1d_index(tot, shape=(16, )) if old_plsr_dac != actual_plsr_dac: # Store the data of the actual PlsrDAC value if old_plsr_dac: # Special case for the first PlsrDAC setting store_bcid_histograms(bcid_array, tot_array, tot_pixel_array) progress_bar.update(old_plsr_dac - min(plsr_dac)) # Reset the histrograms for the next PlsrDAC setting bcid_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) tot_pixel_array = np.zeros((80, 336, len(injection_delay), 16), dtype=np.uint16) tot_array = np.zeros((16, ), dtype=np.uint32) old_plsr_dac = actual_plsr_dac injection_delay_index = np.where( np.array(injection_delay) == actual_injection_delay)[0][0] bcid_array[:, :, injection_delay_index, :] += bcid_array_fast tot_pixel_array[:, :, injection_delay_index, :] += tot_pixel_array_fast tot_array += tot_array_fast store_bcid_histograms( bcid_array, tot_array, tot_pixel_array) # save histograms of last PlsrDAC setting progress_bar.finish() # Take the mean relative BCID histogram of each PlsrDAC value and calculate the delay for each pixel with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as in_file_h5: hists_folder = in_file_h5.create_group(in_file_h5.root, 'PixelHistsBcidJumps') plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values # Info output with progressbar logging.info( 'Detect BCID jumps with pixel based S-Curve fits for PlsrDACs ' + str(plsr_dac_values)) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len(plsr_dac_values), term_width=80) progress_bar.start() for index, node in enumerate( in_file_h5.root.PixelHistsMeanRelBcid ): # loop over all mean relative BCID hists for all PlsrDAC values and determine the BCID jumps actual_plsr_dac = int(re.search( r'\d+', node.name).group()) # actual node plsr dac value # Select the S-curves and interpolate Nans pixel_data = node[:, :, :] pixel_data_fixed = pixel_data.reshape( pixel_data.shape[0] * pixel_data.shape[1] * pixel_data.shape[2]) # Reshape for interpolation of Nans nans, x = ~np.isfinite(pixel_data_fixed), lambda z: z.nonzero()[0] pixel_data_fixed[nans] = np.interp( x(nans), x(~nans), pixel_data_fixed[~nans]) # interpolate Nans pixel_data_fixed = pixel_data_fixed.reshape( pixel_data.shape[0], pixel_data.shape[1], pixel_data.shape[2]) # Reshape after interpolation of Nans # Fit all BCID jumps per pixel (1 - 2 jumps expected) with multithreading pixel_data_shaped = pixel_data_fixed.reshape( pixel_data_fixed.shape[0] * pixel_data_fixed.shape[1], pixel_data_fixed.shape[2]).tolist() pool = mp.Pool( ) # create as many workers as physical cores are available result_array = np.array(pool.map(fit_bcid_jumps, pixel_data_shaped)) pool.close() pool.join() result_array = result_array.reshape(pixel_data_fixed.shape[0], pixel_data_fixed.shape[1], 4) # Store array to file out = in_file_h5.create_carray( hists_folder, name='PixelHistsBcidJumpsPlsrDac_%03d' % actual_plsr_dac, title='BCID jumps per pixel for PlsrDAC ' + str(actual_plsr_dac), atom=tb.Atom.from_dtype(result_array.dtype), shape=result_array.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, BCID first jump, delay first jump, BCID second jump, delay second jump' out[:] = result_array progress_bar.update(index) # Calibrate the step size of the injection delay and create absolute and relative (=time walk) hit delay histograms with tb.open_file(raw_data_file + '_analyzed.h5', mode="r+") as out_file_h5: # Calculate injection delay step size using the average difference of two Scurves of all pixels and plsrDAC settings and the minimum BCID to fix the absolute time scale differences = np.zeros( shape=(336, 80, sum(1 for _ in out_file_h5.root.PixelHistsBcidJumps)), dtype=np.float) min_bcid = 15 for index, node in enumerate( out_file_h5.root.PixelHistsBcidJumps ): # Loop to get last node (the node with most charge injected) pixel_data = node[:, :, :] selection = (np.logical_and(pixel_data[:, :, 0] > 0, pixel_data[:, :, 2] > 0) ) # select pixels with two Scurve fits difference = np.zeros_like(differences[:, :, 0]) difference[selection] = pixel_data[selection, 3] - pixel_data[ selection, 1] # Difference in delay settings between the scurves difference[np.logical_or( difference < 15, difference > 60 )] = 0 # Get rid of bad data leading to difference that is too small / large differences[:, :, index] = difference if np.any(pixel_data[selection, 0]) and np.min( pixel_data[selection, 0] ) < min_bcid: # Search for the minimum rel. BCID delay (= fastes hits) min_bcid = np.amin(pixel_data[selection, 0]) differences = np.ma.masked_where( np.logical_or(differences == 0, ~np.isfinite(differences)), differences) step_size = np.ma.median(differences) # Delay steps needed for 25 ns step_size_error = np.ma.std( differences) # Delay steps needed for 25 ns logging.info( 'Mean step size for the PLsrDAC delay is %1.2f +- %1.2f ns', 25. / step_size, 25. / step_size**2 * step_size_error) # Calculate the hit delay per pixel plsr_dac_values = out_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values hit_delay = np.zeros(shape=(336, 80, len(plsr_dac_values)), dtype=np.float) # Result array for node in out_file_h5.root.PixelHistsBcidJumps: # loop over all BCID jump hists for all PlsrDAC values to calculate the hit delay actual_plsr_dac = int(re.search( r'\d+', node.name).group()) # actual node plsr dac value plsr_dac_index = np.where(plsr_dac_values == actual_plsr_dac)[0][0] pixel_data = node[:, :, :] actual_hit_delay = (pixel_data[:, :, 0] - min_bcid + 1 ) * 25. - pixel_data[:, :, 1] * 25. / step_size hit_delay[:, :, plsr_dac_index] = actual_hit_delay hit_delay = np.ma.masked_less(hit_delay, 0) timewalk = hit_delay - np.amin( hit_delay, axis=2 )[:, :, np. newaxis] # Time walk calc. by normalization to minimum hit delay for every pixel # Calculate the mean TOT per PlsrDAC (additional information, not needed for hit delay) tot = np.zeros(shape=(len(plsr_dac_values), ), dtype=np.float16) # Result array for node in out_file_h5.root.HistsTot: # Loop over tot hist for all PlsrDAC values plsr_dac = int(re.search(r'\d+', node.name).group()) plsr_dac_index = np.where(plsr_dac_values == plsr_dac)[0][0] tot_data = node[:] tot[plsr_dac_index] = get_mean_from_histogram(tot_data, range(16)) # Store the data out = out_file_h5.create_carray( out_file_h5.root, name='HistPixelTimewalkPerPlsrDac', title='Time walk per pixel and PlsrDAC', atom=tb.Atom.from_dtype(timewalk.dtype), shape=timewalk.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.create_carray( out_file_h5.root, name='HistPixelHitDelayPerPlsrDac', title='Hit delay per pixel and PlsrDAC', atom=tb.Atom.from_dtype(hit_delay.dtype), shape=hit_delay.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.create_carray(out_file_h5.root, name='HistTotPerPlsrDac', title='Tot per PlsrDAC', atom=tb.Atom.from_dtype(tot.dtype), shape=tot.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out.attrs.dimensions = 'column, row, PlsrDAC' out.attrs.delay_calibration = step_size out.attrs.delay_calibration_error = step_size_error out.attrs.plsr_dac_values = plsr_dac_values out_2.attrs.dimensions = 'column, row, PlsrDAC' out_2.attrs.delay_calibration = step_size out_2.attrs.delay_calibration_error = step_size_error out_2.attrs.plsr_dac_values = plsr_dac_values out_3.attrs.dimensions = 'PlsrDAC' out_3.attrs.plsr_dac_values = plsr_dac_values out[:] = timewalk.filled(fill_value=np.NaN) out_2[:] = hit_delay.filled(fill_value=np.NaN) out_3[:] = tot # Mask the pixels that have non valid data and create plots with the time walk and hit delay for all pixels with tb.open_file(raw_data_file + '_analyzed.h5', mode="r") as in_file_h5: def plsr_dac_to_charge( plsr_dac, vcal_c0, vcal_c1, c_high): # Calibration values are taken from file voltage = vcal_c0 + vcal_c1 * plsr_dac return voltage * c_high / 0.16022 def plot_hit_delay(hist_3d, charge_values, title, xlabel, ylabel, filename, threshold=None, tot_values=None): # Interpolate tot values for second tot axis interpolation = interp1d(tot_values, charge_values, kind='slinear', bounds_error=True) tot = np.arange(16) tot = tot[np.logical_and(tot >= np.min(tot_values), tot <= np.max(tot_values))] array = np.transpose(hist_3d, axes=(2, 1, 0)).reshape( hist_3d.shape[2], hist_3d.shape[0] * hist_3d.shape[1]) y = np.mean(array, axis=1) y_err = np.std(array, axis=1) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim((0, np.max(charge_values))) ax.set_ylim((np.min(y - y_err), np.max(y + y_err))) ax.plot(charge_values, y, '.-', color='black', label=title) if threshold is not None: ax.plot( [threshold, threshold], [np.min(y - y_err), np.max(y + y_err)], linestyle='--', color='black', label='Threshold\n%d e' % (threshold)) ax.fill_between(charge_values, y - y_err, y + y_err, color='gray', alpha=0.5, facecolor='gray', label='RMS') ax2 = ax.twiny() ax2.set_xlabel("ToT") ticklab = ax2.xaxis.get_ticklabels()[0] trans = ticklab.get_transform() ax2.xaxis.set_label_coords(np.max(charge_values), 1, transform=trans) ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(interpolation(tot)) ax2.set_xticklabels([str(int(i)) for i in tot]) ax.text(0.5, 1.07, title, horizontalalignment='center', fontsize=18, transform=ax2.transAxes) ax.legend() filename.savefig(fig) plsr_dac_values = in_file_h5.root.PixelHistsMeanRelBcid._v_attrs.plsr_dac_values charge_values = plsr_dac_to_charge(np.array(plsr_dac_values), vcal_c0, vcal_c1, c_high) hist_timewalk = in_file_h5.root.HistPixelTimewalkPerPlsrDac[:, :, :] hist_hit_delay = in_file_h5.root.HistPixelHitDelayPerPlsrDac[:, :, :] tot = in_file_h5.root.HistTotPerPlsrDac[:] hist_timewalk = np.ma.masked_invalid(hist_timewalk) hist_hit_delay = np.ma.masked_invalid(hist_hit_delay) output_pdf = PdfPages(raw_data_file + '_analyzed.pdf') plot_hit_delay(np.swapaxes(hist_timewalk, 0, 1), charge_values=charge_values, title='Time walk', xlabel='Charge [e]', ylabel='Time walk [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_hit_delay(np.swapaxes(hist_hit_delay, 0, 1), charge_values=charge_values, title='Hit delay', xlabel='Charge [e]', ylabel='Hit delay [ns]', filename=output_pdf, threshold=np.amin(charge_values), tot_values=tot) plot_scurves(np.swapaxes(hist_timewalk, 0, 1), scan_parameters=charge_values, title='Timewalk of the FE-I4', scan_parameter_name='Charge [e]', ylabel='Timewalk [ns]', min_x=0, filename=output_pdf) plot_scurves( np.swapaxes(hist_hit_delay[:, :, :], 0, 1), scan_parameters=charge_values, title='Hit delay (T0) with internal charge injection\nof the FE-I4', scan_parameter_name='Charge [e]', ylabel='Hit delay [ns]', min_x=0, filename=output_pdf) for i in [ 0, 1, len(plsr_dac_values) / 4, len(plsr_dac_values) / 2, -1 ]: # Plot 2d hist at min, 1/4, 1/2, max PlsrDAC setting plot_three_way(hist_timewalk[:, :, i], title='Time walk at %.0f e' % (charge_values[i]), x_axis_title='Time walk [ns]', filename=output_pdf) plot_three_way( hist_hit_delay[:, :, i], title='Hit delay (T0) with internal charge injection at %.0f e' % (charge_values[i]), x_axis_title='Hit delay [ns]', minimum=np.amin(hist_hit_delay[:, :, i]), maximum=np.max(hist_hit_delay[:, :, i]), filename=output_pdf) output_pdf.close()
def analyze(self): with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data: analyze_raw_data.create_tot_hist = True analyze_raw_data.create_mean_tot_hist = True analyze_raw_data.interpret_word_table() analyze_raw_data.plot_histograms() analyze_raw_data.interpreter.print_summary() with tb.open_file(analyze_raw_data._analyzed_data_file, 'r') as in_file_h5: meta_data = in_file_h5.root.meta_data[:] tot_mean = np.swapaxes(in_file_h5.root.HistMeanTot[:], 1, 0) scan_parameters_dict = get_scan_parameter(meta_data) inner_loop_parameter_values = scan_parameters_dict[next( reversed(scan_parameters_dict) )] # inner loop parameter name is unknown # calculate mean ToT arrays tot_mean_all_pix = np.nanmean(tot_mean, axis=(0, 1)) tot_error_all_pix = np.nanstd(tot_mean, axis=(0, 1)) plot_scurves(tot_mean, inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf) plot_tot_tdc_calibration( scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all(np.isnan(tot_mean), axis=2))) # selecting pixels with non-nan entries col_row_non_nan = np.nonzero( ~np.all(np.isnan(tot_mean), axis=2)) plot_pixel_calibrations = np.dstack(col_row_non_nan)[0] # generate index array pixel_indices = np.arange(plot_pixel_calibrations.shape[0]) plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array np.random.seed(0) if pixel_indices.size - 2 * plot_n_pixels >= 0: random_pixel_indices = np.sort( np.random.choice( pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False)) else: random_pixel_indices = np.array([], dtype=np.int) selected_pixel_indices = np.unique( np.hstack([ pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:] ])) # plotting individual pixels for (column, row) in plot_pixel_calibrations[selected_pixel_indices]: logging.info( "Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1)) tot_mean_single_pix = tot_mean[column, row, :] plot_tot_tdc_calibration( scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
def create_threshold_calibration( scan_base_file_name, create_plots=True ): # Create calibration function, can be called stand alone def analyze_raw_data_file(file_name): if os.path.isfile(file_name[:-3] + '_interpreted.h5'): # skip analysis if already done logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.') else: with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data: analyze_raw_data.create_tot_hist = False analyze_raw_data.create_tot_pixel_hist = False analyze_raw_data.create_fitted_threshold_hists = True analyze_raw_data.create_threshold_mask = True analyze_raw_data.interpreter.set_warning_output( False) # RX errors would fill the console analyze_raw_data.interpret_word_table() def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values): logging.info("Storing calibration data in a table...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table) threshold_calib_table = out_file_h5.createTable( out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table) for column in range(80): for row in range(336): for parameter_value_index, parameter_value in enumerate( parameter_values): threshold_calib_table.row['column'] = column threshold_calib_table.row['row'] = row threshold_calib_table.row[ 'parameter_value'] = parameter_value threshold_calib_table.row[ 'threshold'] = threshold_calibration[ column, row, parameter_value_index] threshold_calib_table.row.append() for parameter_value_index, parameter_value in enumerate( parameter_values): mean_threshold_calib_table.row['parameter_value'] = parameter_value mean_threshold_calib_table.row[ 'mean_threshold'] = mean_threshold_calibration[ parameter_value_index] mean_threshold_calib_table.row[ 'threshold_rms'] = mean_threshold_rms_calibration[ parameter_value_index] mean_threshold_calib_table.row.append() threshold_calib_table.flush() mean_threshold_calib_table.flush() logging.info("done") def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values): logging.info("Storing calibration data in an array...") filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) mean_threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table) mean_threshold_calib_rms_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table) threshold_calib_array = out_file_h5.createCArray( out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table) mean_threshold_calib_array[:] = mean_threshold_calibration mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration threshold_calib_array[:] = threshold_calibration mean_threshold_calib_array.attrs.dimensions = [ 'column', 'row', parameter_name ] mean_threshold_calib_rms_array.attrs.dimensions = [ 'column', 'row', parameter_name ] threshold_calib_array.attrs.dimensions = [ 'column', 'row', parameter_name ] mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values threshold_calib_array.attrs.scan_parameter_values = parameter_values logging.info("done") def mask_columns(pixel_array, ignore_columns): idx = np.array(ignore_columns) - 1 # from FE to Array columns m = np.zeros_like(pixel_array) m[:, idx] = 1 return np.ma.masked_array(pixel_array, m) raw_data_files = analysis_utils.get_data_file_names_from_scan_base( scan_base_file_name, filter_file_words=['interpreted', 'calibration_calibration']) first_scan_base_file_name = scan_base_file_name if isinstance( scan_base_file_name, basestring) else scan_base_file_name[ 0] # multilpe scan_base_file_names for multiple runs with tb.openFile( first_scan_base_file_name + '.h5', mode="r" ) as in_file_h5: # deduce scan parameters from the first (and often only) scan base file name ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where( in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0] parameter_name = in_file_h5.root.configuration.run_conf[:][np.where( in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0] ignore_columns = ast.literal_eval(ignore_columns) parameter_name = ast.literal_eval(parameter_name)[1][0] calibration_file = first_scan_base_file_name + '_calibration' for raw_data_file in raw_data_files: # analyze each raw data file, not using multithreading here, it is already used in s-curve fit analyze_raw_data_file(raw_data_file) files_per_parameter = analysis_utils.get_parameter_value_from_file_names( [file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True) logging.info("Create calibration from data") mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ), dtype='<f8') mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files), ), dtype='<f8') threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8') if create_plots: logging.info('Saving calibration plots in: %s', calibration_file + '.pdf') output_pdf = PdfPages(calibration_file + '.pdf') progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=len( files_per_parameter.items()), term_width=80) progress_bar.start() parameter_values = [] for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()): parameter_values.append(parameters.values()[0][0]) with tb.openFile(analyzed_data_file, mode="r") as in_file_h5: occupancy_masked = mask_columns( pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns ) # mask the not scanned columns for analysis and plotting thresholds_masked = mask_columns( pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns) if create_plots: plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf) plsr_dacs = analysis_utils.get_scan_parameter( meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC'] plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf) # fill the calibration data arrays mean_threshold_calibration[index] = np.ma.mean(thresholds_masked) mean_threshold_rms_calibration[index] = np.ma.std( thresholds_masked) threshold_calibration[:, :, index] = thresholds_masked.T progress_bar.update(index) progress_bar.finish() with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5: store_calibration_data_as_array( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values) store_calibration_data_as_table( out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values) if create_plots: plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf) plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf) output_pdf.close()
def create_hitor_calibration(output_filename): logging.info('Analyze and plot results of %s', output_filename) def plot_calibration(col_row_combinations, scan_parameter, calibration_data, filename): # Result calibration plot function for index, (column, row) in enumerate(col_row_combinations): logging.info("Plot calibration for pixel " + str(column) + '/' + str(row)) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) fig.patch.set_facecolor('white') ax.grid(True) ax.errorbar(scan_parameter, calibration_data[column - 1, row - 1, :, 0] * 25. + 25., yerr=[calibration_data[column - 1, row - 1, :, 2] * 25, calibration_data[column - 1, row - 1, :, 2] * 25], fmt='o', label='FE-I4 ToT [ns]') ax.errorbar(scan_parameter, calibration_data[column - 1, row - 1, :, 1] * 1.5625, yerr=[calibration_data[column - 1, row - 1, :, 3] * 1.5625, calibration_data[column - 1, row - 1, :, 3] * 1.5625], fmt='o', label='TDC ToT [ns]') ax.set_title('Calibration for pixel ' + str(column) + '/' + str(row)) ax.set_xlabel('Charge [PlsrDAC]') ax.set_ylabel('TOT') ax.legend(loc=0) filename.savefig(fig) if index > 100: # stop for too many plots logging.info('Do not create pixel plots for more than 100 pixels to safe time') break with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data: # Interpret the raw data file analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histograming analyze_raw_data.create_hit_table = True analyze_raw_data.create_tdc_hist = True analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() n_injections = analyze_raw_data.n_injections # store number of injections for later cross check with tb.open_file(output_filename + '_interpreted.h5', 'r') as in_file_h5: # Get scan parameters from interpreted file meta_data = in_file_h5.root.meta_data[:] hits = in_file_h5.root.Hits[:] scan_parameters_dict = get_scan_parameter(meta_data) inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))] # inner loop parameter name is unknown scan_parameter_names = scan_parameters_dict.keys() col_row_combinations = get_unique_scan_parameter_combinations(in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True) meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names) parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) event_ranges_per_parameter = np.column_stack((parameter_values, event_number_ranges)) event_numbers = hits['event_number'].copy() # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays with tb.openFile(output_filename + "_calibration.h5", mode="w") as calibration_data_file: logging.info('Create calibration') output_pdf = PdfPages(output_filename + "_calibration.pdf") calibration_data = np.zeros(shape=(80, 336, len(inner_loop_parameter_values), 4), dtype='f4') # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80) progress_bar.start() for index, (parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter): if event_stop is None: # happens for the last chunk event_stop = hits[-1]['event_number'] array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop])) actual_hits = hits[array_index[0]:array_index[1]] actual_col, actual_row, parameter_value = parameter_values if len(hits[np.logical_and(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)]): logging.warning('There are %d hits from not selected pixels in the data', len(actual_hits[np.logical_and(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)])) actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error) actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC'] if tdc.shape[0] != n_injections and index == event_ranges_per_parameter.shape[0] - 1: logging.warning('There are %d != %d TDC hits for %s = %s', tdc.shape[0], n_injections, str(scan_parameter_names), str(parameter_values)) inner_loop_scan_parameter_index = np.where(parameter_value == inner_loop_parameter_values)[0][0] # translate the scan parameter value to an index for the result histogram calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc) progress_bar.update(index) calibration_data_out = calibration_data_file.createCArray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) calibration_data_out[:] = calibration_data calibration_data_out.attrs.dimensions = scan_parameter_names calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values plot_calibration(col_row_combinations, scan_parameter=inner_loop_parameter_values, calibration_data=calibration_data, filename=output_pdf) output_pdf.close() progress_bar.finish()