def plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff, filename=None): ''' Fit spline to the profile histogramed data, differentiate, determine MPV and plot. Parameters ---------- x_p, y_p : array like data points (x,y) y_p_e : array like error bars in y ''' logging.info('Plot results') plt.close() p1 = plt.errorbar(x_p * analysis_configuration['vcal_calibration'], y_p, yerr=y_p_e, fmt='o') # plot data with error bars p2, = plt.plot(x_p * analysis_configuration['vcal_calibration'], smoothed_data, '-r') # plot smoothed data factor = np.amax(y_p) / np.amin(smoothed_data_diff) * 1.1 p3, = plt.plot(x_p * analysis_configuration['vcal_calibration'], factor * smoothed_data_diff, '-', lw=2) # plot differentiated data mpv_index = np.argmax(-analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1)) p4, = plt.plot([x_p[mpv_index] * analysis_configuration['vcal_calibration'], x_p[mpv_index] * analysis_configuration['vcal_calibration']], [0, factor * smoothed_data_diff[mpv_index]], 'k-', lw=2) text = 'MPV ' + str(int(x_p[mpv_index] * analysis_configuration['vcal_calibration'])) + ' e' plt.text(1.01 * x_p[mpv_index] * analysis_configuration['vcal_calibration'], -10. * smoothed_data_diff[mpv_index], text, ha='left') plt.legend([p1, p2, p3, p4], ['data', 'smoothed spline', 'spline differentiation', text], prop={'size': 12}, loc=0) plt.title('\'Single hit cluster\'-occupancy for different pixel thresholds') plt.xlabel('Pixel threshold [e]') plt.ylabel('Single hit cluster occupancy [a.u.]') plt.ylim(0, np.amax(y_p) * 1.15) if filename is None: plt.show() else: filename.savefig(plt.gcf()) return smoothed_data_diff
def get_calibration_correction(tdc_calibration, tdc_calibration_values, filename_new_calibration): # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means with tb.open_file(filename_new_calibration, 'r') as in_file_2: charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :, 1] plsr_dacs = tdc_calibration_values if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration._v_attrs.scan_parameter_values): raise NotImplementedError('The check calibration file has to have the same PlsrDAC values') # Valid pixel have a calibration in the new and the old calibration valid_pixel = np.where(~np.all((charge_calibration_1 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_1), axis=2) & ~np.all((charge_calibration_2 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_2), axis=2)) mean_charge_calibration = np.nanmean(charge_calibration_2[valid_pixel], axis=0) offset_mean = np.nanmean((charge_calibration_2[valid_pixel] - charge_calibration_1[valid_pixel]), axis=0) dPlsrDAC_dTDC = analysis_utils.smooth_differentiation(plsr_dacs, mean_charge_calibration, order=3, smoothness=0, derivation=1) plt.clf() plt.plot(plsr_dacs, offset_mean / dPlsrDAC_dTDC, '.-', label='PlsrDAC') plt.plot(plsr_dacs, offset_mean, '.-', label='TDC') plt.grid() plt.xlabel('PlsrDAC') plt.ylabel('Mean calibration offset') plt.legend(loc=0) plt.title('Mean offset between TDC calibration data, new - old ') plt.savefig(filename_new_calibration[:-3] + '.pdf') plt.show() return offset_mean
def get_calibration_correction( tdc_calibration, tdc_calibration_values, filename_new_calibration ): # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means with tb.open_file(filename_new_calibration, 'r') as in_file_2: with PdfPages( os.path.splitext(filename_new_calibration)[0] + '.pdf') as output_pdf: charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :, 1] plsr_dacs = tdc_calibration_values if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration. _v_attrs.scan_parameter_values): raise NotImplementedError( 'The check calibration file has to have the same PlsrDAC values' ) # Valid pixel have a calibration in the new and the old calibration valid_pixel = np.where( ~np.all((charge_calibration_1 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_1), axis=2) & ~np.all((charge_calibration_2 == 0), axis=2) & ~np.all(np.isnan(charge_calibration_2), axis=2)) mean_charge_calibration = np.nanmean( charge_calibration_2[valid_pixel], axis=0) offset_mean = np.nanmean((charge_calibration_2[valid_pixel] - charge_calibration_1[valid_pixel]), axis=0) dPlsrDAC_dTDC = analysis_utils.smooth_differentiation( plsr_dacs, mean_charge_calibration, order=3, smoothness=0, derivation=1) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) ax.plot(plsr_dacs, offset_mean / dPlsrDAC_dTDC, '.-', label='PlsrDAC') ax.plot(plsr_dacs, offset_mean, '.-', label='TDC') ax.grid(True) ax.set_xlabel('PlsrDAC') ax.set_ylabel('Mean calibration offset') ax.legend(loc=0) ax.set_title( 'Mean offset between TDC calibration data, new - old ') output_pdf.savefig(fig) return offset_mean
def get_calibration_correction(tdc_calibration, tdc_calibration_values, filename_new_calibration): # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means with tb.open_file(filename_new_calibration, 'r') as in_file_2: charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :, 1] plsr_dacs = tdc_calibration_values if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration._v_attrs.scan_parameter_values): raise NotImplementedError('The check calibration file has to have the same PlsrDAC values') valid_pixel = np.where(np.logical_and(charge_calibration_1.sum(axis=2) > 0, charge_calibration_2.sum(axis=2) > 0)) # valid pixel have a calibration in the new and the old calibration mean_charge_calibration = charge_calibration_2[valid_pixel].mean(axis=0) offset_mean = (charge_calibration_1[valid_pixel] - charge_calibration_2[valid_pixel]).mean(axis=0) dPlsrDAC_dTDC = analysis_utils.smooth_differentiation(plsr_dacs, mean_charge_calibration, order=3, smoothness=0, derivation=1) plt.clf() plt.plot(plsr_dacs, offset_mean / dPlsrDAC_dTDC, '.-', label='PlsrDAC') plt.plot(plsr_dacs, offset_mean, '.-', label='TDC') plt.grid() plt.xlabel('PlsrDAC') plt.ylabel('Mean calibration offset') plt.legend(loc=0) plt.title('Mean offset between TDC calibration data, old - new ') plt.show() return offset_mean
def analyze_injected_charge(data_analyzed_file): logging.info('Analyze the injected charge') with tb.openFile(data_analyzed_file, mode="r") as in_file_h5: occupancy = in_file_h5.root.HistOcc[:].T gdacs = analysis_utils.get_scan_parameter( in_file_h5.root.meta_data[:])['GDAC'] with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file: plotting.plot_scatter( gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file) if analysis_configuration['input_file_calibration']: with tb.openFile( analysis_configuration['input_file_calibration'], mode="r" ) as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:] threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:] gdac_range_calibration = np.array( in_file_calibration_h5.root.HistThresholdCalibration. _v_attrs.scan_parameter_values) gdac_range_source_scan = gdacs # Select data that is within the given GDAC range, (min_gdac, max_gdac) sel = np.where( np.logical_and( gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0] gdac_range_source_scan = gdac_range_source_scan[sel] occupancy = occupancy[:, :, sel] sel = np.where( np.logical_and( gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0] gdac_range_calibration = gdac_range_calibration[sel] threshold_calibration_array = threshold_calibration_array[:, :, sel] logging.info( 'Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan))) logging.info( 'Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration))) # rate_normalization of the total hit number for each GDAC setting rate_normalization = 1. if analysis_configuration['normalize_rate']: rate_normalization = analysis_utils.get_rate_normalization( hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration[ 'normalization_reference'], plot=analysis_configuration['plot_normalization']) # correcting the hit numbers for the different cluster sizes correction_factors = 1. if analysis_configuration['use_cluster_rate_correction']: correction_h5 = tb.openFile(cluster_sizes_file, mode="r") cluster_size_histogram = correction_h5.root.AllHistClusterSize[:] correction_factors = analysis_utils.get_hit_rate_correction( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram) if analysis_configuration['plot_cluster_sizes']: plot_cluster_sizes( correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan) pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array( gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array ) # interpolates the threshold at the source scan GDAC setting from the calibration pixel_hits = occupancy # create hit array with shape (col, row, ...) pixel_hits = pixel_hits * correction_factors * rate_normalization # choose region with pixels that have a sufficient occupancy but are not too hot good_pixel = analysis_utils.select_good_pixel_region( pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration[ 'min_cut_threshold'], max_cut_threshold=analysis_configuration[ 'max_cut_threshold']) pixel_mask = ~np.ma.getmaskarray(good_pixel) selected_pixel_hits = pixel_hits[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region selected_pixel_thresholds = pixel_thresholds[ pixel_mask, :] # reduce the data to pixels that are in the good pixel region plotting.plot_occupancy( good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file) # reshape to one dimension x = selected_pixel_thresholds.flatten() y = selected_pixel_hits.flatten() # nothing should be NAN/INF, NAN/INF is not supported yet if np.isfinite(x).shape != x.shape or np.isfinite( y).shape != y.shape: logging.warning( 'There are pixels with NaN or INF threshold or hit values, analysis will fail' ) # calculated profile histogram x_p, y_p, y_p_e = analysis_utils.get_profile_histogram( x, y, n_bins=analysis_configuration['n_bins'] ) # profile histogram data # select only the data point where the calibration worked selected_data = np.logical_and( x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration']) x_p = x_p[selected_data] y_p = y_p[selected_data] y_p_e = y_p_e[selected_data] if len(y_p_e[y_p_e == 0]) != 0: logging.warning( 'There are bins without any data, guessing the error bars' ) y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0]) smoothed_data = analysis_utils.smooth_differentiation( x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0) smoothed_data_diff = analysis_utils.smooth_differentiation( x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1) with tb.openFile(data_analyzed_file[:-3] + '_result.h5', mode="w") as out_file_h5: result_1 = np.rec.array(np.column_stack( (x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)]) result_2 = np.rec.array(np.column_stack( (x_p, smoothed_data)), dtype=[('charge', float), ('count', float)]) result_3 = np.rec.array(np.column_stack( (x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)]) out_1 = out_file_h5.create_table( out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title= 'Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.create_table( out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title= 'Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.create_table( out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title= 'Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) for key, value in analysis_configuration.iteritems(): out_1.attrs[key] = value out_2.attrs[key] = value out_3.attrs[key] = value out_1.append(result_1) out_2.append(result_2) out_3.append(result_3) plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff) # calculate and plot mean results x_mean = analysis_utils.get_mean_threshold_from_calibration( gdac_range_source_scan, mean_threshold_calibration) y_mean = selected_pixel_hits.mean(axis=(0)) plotting.plot_scatter( np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title= 'Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file) plotting.plot_scatter( x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title= 'Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file) if analysis_configuration['use_cluster_rate_correction']: correction_h5.close()
def analyze_injected_charge(data_analyzed_file): logging.info('Analyze the injected charge') with tb.openFile(data_analyzed_file, mode="r") as in_file_h5: occupancy = in_file_h5.root.HistOcc[:].T gdacs = analysis_utils.get_scan_parameter(in_file_h5.root.meta_data[:])['GDAC'] with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file: plotting.plot_scatter(gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file) if analysis_configuration['input_file_calibration']: with tb.openFile(analysis_configuration['input_file_calibration'], mode="r") as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:] threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:] gdac_range_calibration = np.array(in_file_calibration_h5.root.HistThresholdCalibration._v_attrs.scan_parameter_values) gdac_range_source_scan = gdacs # Select data that is within the given GDAC range, (min_gdac, max_gdac) sel = np.where(np.logical_and(gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0] gdac_range_source_scan = gdac_range_source_scan[sel] occupancy = occupancy[:, :, sel] sel = np.where(np.logical_and(gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0] gdac_range_calibration = gdac_range_calibration[sel] threshold_calibration_array = threshold_calibration_array[:, :, sel] logging.info('Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan))) logging.info('Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration))) # rate_normalization of the total hit number for each GDAC setting rate_normalization = 1. if analysis_configuration['normalize_rate']: rate_normalization = analysis_utils.get_rate_normalization(hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration['normalization_reference'], plot=analysis_configuration['plot_normalization']) # correcting the hit numbers for the different cluster sizes correction_factors = 1. if analysis_configuration['use_cluster_rate_correction']: correction_h5 = tb.openFile(cluster_sizes_file, mode="r") cluster_size_histogram = correction_h5.root.AllHistClusterSize[:] correction_factors = analysis_utils.get_hit_rate_correction(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram) if analysis_configuration['plot_cluster_sizes']: plot_cluster_sizes(correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan) pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array) # interpolates the threshold at the source scan GDAC setting from the calibration pixel_hits = occupancy # create hit array with shape (col, row, ...) pixel_hits = pixel_hits * correction_factors * rate_normalization # choose region with pixels that have a sufficient occupancy but are not too hot good_pixel = analysis_utils.select_good_pixel_region(pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration['min_cut_threshold'], max_cut_threshold=analysis_configuration['max_cut_threshold']) pixel_mask = ~np.ma.getmaskarray(good_pixel) selected_pixel_hits = pixel_hits[pixel_mask, :] # reduce the data to pixels that are in the good pixel region selected_pixel_thresholds = pixel_thresholds[pixel_mask, :] # reduce the data to pixels that are in the good pixel region plotting.plot_occupancy(good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file) # reshape to one dimension x = selected_pixel_thresholds.flatten() y = selected_pixel_hits.flatten() # nothing should be NAN/INF, NAN/INF is not supported yet if np.isfinite(x).shape != x.shape or np.isfinite(y).shape != y.shape: logging.warning('There are pixels with NaN or INF threshold or hit values, analysis will fail') # calculated profile histogram x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(x, y, n_bins=analysis_configuration['n_bins']) # profile histogram data # select only the data point where the calibration worked selected_data = np.logical_and(x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration']) x_p = x_p[selected_data] y_p = y_p[selected_data] y_p_e = y_p_e[selected_data] if len(y_p_e[y_p_e == 0]) != 0: logging.warning('There are bins without any data, guessing the error bars') y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0]) smoothed_data = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0) smoothed_data_diff = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1) with tb.openFile(data_analyzed_file[:-3] + '_result.h5', mode="w") as out_file_h5: result_1 = np.rec.array(np.column_stack((x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)]) result_2 = np.rec.array(np.column_stack((x_p, smoothed_data)), dtype=[('charge', float), ('count', float)]) result_3 = np.rec.array(np.column_stack((x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)]) out_1 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title='Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_2 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title='Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) out_3 = out_file_h5.create_table(out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title='Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) for key, value in analysis_configuration.iteritems(): out_1.attrs[key] = value out_2.attrs[key] = value out_3.attrs[key] = value out_1.append(result_1) out_2.append(result_2) out_3.append(result_3) plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff) # calculate and plot mean results x_mean = analysis_utils.get_mean_threshold_from_calibration(gdac_range_source_scan, mean_threshold_calibration) y_mean = selected_pixel_hits.mean(axis=(0)) plotting.plot_scatter(np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title='Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file) plotting.plot_scatter(x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title='Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file) if analysis_configuration['use_cluster_rate_correction']: correction_h5.close()