Exemple #1
0
def create_threshold_calibration(
    scan_base_file_name,
    create_plots=True
):  # Create calibration function, can be called stand alone
    def analyze_raw_data_file(file_name):
        if os.path.isfile(file_name[:-3] +
                          '_interpreted.h5'):  # skip analysis if already done
            logging.warning('Analyzed data file ' + file_name +
                            ' already exists. Skip analysis for this file.')
        else:
            with AnalyzeRawData(raw_data_file=file_name,
                                create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_tot_pixel_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(
                    False)  # RX errors would fill the console
                analyze_raw_data.interpret_word_table()

    def store_calibration_data_as_table(out_file_h5,
                                        mean_threshold_calibration,
                                        mean_threshold_rms_calibration,
                                        threshold_calibration,
                                        parameter_values):
        logging.info("Storing calibration data in a table...")
        filter_table = tb.Filters(complib='blosc',
                                  complevel=5,
                                  fletcher32=False)
        mean_threshold_calib_table = out_file_h5.createTable(
            out_file_h5.root,
            name='MeanThresholdCalibration',
            description=data_struct.MeanThresholdCalibrationTable,
            title='mean_threshold_calibration',
            filters=filter_table)
        threshold_calib_table = out_file_h5.createTable(
            out_file_h5.root,
            name='ThresholdCalibration',
            description=data_struct.ThresholdCalibrationTable,
            title='threshold_calibration',
            filters=filter_table)
        for column in range(80):
            for row in range(336):
                for parameter_value_index, parameter_value in enumerate(
                        parameter_values):
                    threshold_calib_table.row['column'] = column
                    threshold_calib_table.row['row'] = row
                    threshold_calib_table.row[
                        'parameter_value'] = parameter_value
                    threshold_calib_table.row[
                        'threshold'] = threshold_calibration[
                            column, row, parameter_value_index]
                    threshold_calib_table.row.append()
        for parameter_value_index, parameter_value in enumerate(
                parameter_values):
            mean_threshold_calib_table.row['parameter_value'] = parameter_value
            mean_threshold_calib_table.row[
                'mean_threshold'] = mean_threshold_calibration[
                    parameter_value_index]
            mean_threshold_calib_table.row[
                'threshold_rms'] = mean_threshold_rms_calibration[
                    parameter_value_index]
            mean_threshold_calib_table.row.append()
        threshold_calib_table.flush()
        mean_threshold_calib_table.flush()
        logging.info("done")

    def store_calibration_data_as_array(out_file_h5,
                                        mean_threshold_calibration,
                                        mean_threshold_rms_calibration,
                                        threshold_calibration, parameter_name,
                                        parameter_values):
        logging.info("Storing calibration data in an array...")
        filter_table = tb.Filters(complib='blosc',
                                  complevel=5,
                                  fletcher32=False)
        mean_threshold_calib_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdMeanCalibration',
            atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
            shape=mean_threshold_calibration.shape,
            title='mean_threshold_calibration',
            filters=filter_table)
        mean_threshold_calib_rms_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdRMSCalibration',
            atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
            shape=mean_threshold_calibration.shape,
            title='mean_threshold_rms_calibration',
            filters=filter_table)
        threshold_calib_array = out_file_h5.createCArray(
            out_file_h5.root,
            name='HistThresholdCalibration',
            atom=tb.Atom.from_dtype(threshold_calibration.dtype),
            shape=threshold_calibration.shape,
            title='threshold_calibration',
            filters=filter_table)
        mean_threshold_calib_array[:] = mean_threshold_calibration
        mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
        threshold_calib_array[:] = threshold_calibration
        mean_threshold_calib_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        mean_threshold_calib_rms_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        threshold_calib_array.attrs.dimensions = [
            'column', 'row', parameter_name
        ]
        mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
        mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
        threshold_calib_array.attrs.scan_parameter_values = parameter_values

        logging.info("done")

    def mask_columns(pixel_array, ignore_columns):
        idx = np.array(ignore_columns) - 1  # from FE to Array columns
        m = np.zeros_like(pixel_array)
        m[:, idx] = 1
        return np.ma.masked_array(pixel_array, m)

    raw_data_files = analysis_utils.get_data_file_names_from_scan_base(
        scan_base_file_name,
        filter_file_words=['interpreted', 'calibration_calibration'])
    first_scan_base_file_name = scan_base_file_name if isinstance(
        scan_base_file_name, basestring) else scan_base_file_name[
            0]  # multilpe scan_base_file_names for multiple runs

    with tb.openFile(
            first_scan_base_file_name + '.h5', mode="r"
    ) as in_file_h5:  # deduce scan parameters from the first (and often only) scan base file name
        ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(
            in_file_h5.root.configuration.run_conf[:]['name'] ==
            'ignore_columns')]['value'][0]
        parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(
            in_file_h5.root.configuration.run_conf[:]['name'] ==
            'scan_parameters')]['value'][0]
        ignore_columns = ast.literal_eval(ignore_columns)
        parameter_name = ast.literal_eval(parameter_name)[1][0]

    calibration_file = first_scan_base_file_name + '_calibration'

    for raw_data_file in raw_data_files:  # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
        analyze_raw_data_file(raw_data_file)

    files_per_parameter = analysis_utils.get_parameter_value_from_file_names(
        [file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files],
        parameter_name,
        unique=True,
        sort=True)

    logging.info("Create calibration from data")
    mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ),
                                          dtype='<f8')
    mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files), ),
                                              dtype='<f8')
    threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)),
                                     dtype='<f8')

    if create_plots:
        logging.info('Saving calibration plots in: %s',
                     calibration_file + '.pdf')
        output_pdf = PdfPages(calibration_file + '.pdf')

    progress_bar = progressbar.ProgressBar(widgets=[
        '',
        progressbar.Percentage(), ' ',
        progressbar.Bar(marker='*', left='|', right='|'), ' ',
        progressbar.AdaptiveETA()
    ],
                                           maxval=len(
                                               files_per_parameter.items()),
                                           term_width=80)
    progress_bar.start()
    parameter_values = []
    for index, (analyzed_data_file,
                parameters) in enumerate(files_per_parameter.items()):
        parameter_values.append(parameters.values()[0][0])
        with tb.openFile(analyzed_data_file, mode="r") as in_file_h5:
            occupancy_masked = mask_columns(
                pixel_array=in_file_h5.root.HistOcc[:],
                ignore_columns=ignore_columns
            )  # mask the not scanned columns for analysis and plotting
            thresholds_masked = mask_columns(
                pixel_array=in_file_h5.root.HistThresholdFitted[:],
                ignore_columns=ignore_columns)
            if create_plots:
                plot_three_way(hist=thresholds_masked,
                               title='Threshold Fitted for ' +
                               parameters.keys()[0] + ' = ' +
                               str(parameters.values()[0][0]),
                               filename=output_pdf)
                plsr_dacs = analysis_utils.get_scan_parameter(
                    meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
                plot_scurves(occupancy_hist=occupancy_masked,
                             scan_parameters=plsr_dacs,
                             scan_parameter_name='PlsrDAC',
                             filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[index] = np.ma.std(
                thresholds_masked)
            threshold_calibration[:, :, index] = thresholds_masked.T
        progress_bar.update(index)
    progress_bar.finish()

    with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5:
        store_calibration_data_as_array(
            out_file_h5=out_file_h5,
            mean_threshold_calibration=mean_threshold_calibration,
            mean_threshold_rms_calibration=mean_threshold_rms_calibration,
            threshold_calibration=threshold_calibration,
            parameter_name=parameter_name,
            parameter_values=parameter_values)
        store_calibration_data_as_table(
            out_file_h5=out_file_h5,
            mean_threshold_calibration=mean_threshold_calibration,
            mean_threshold_rms_calibration=mean_threshold_rms_calibration,
            threshold_calibration=threshold_calibration,
            parameter_values=parameter_values)

    if create_plots:
        plot_scatter(x=parameter_values,
                     y=mean_threshold_calibration,
                     title='Threshold calibration',
                     x_label=parameter_name,
                     y_label='Mean threshold',
                     log_x=False,
                     filename=output_pdf)
        plot_scatter(x=parameter_values,
                     y=mean_threshold_calibration,
                     title='Threshold calibration',
                     x_label=parameter_name,
                     y_label='Mean threshold',
                     log_x=True,
                     filename=output_pdf)
        output_pdf.close()
def analyze_injected_charge(data_analyzed_file):
    logging.info('Analyze the injected charge')
    with tb.openFile(data_analyzed_file, mode="r") as in_file_h5:
        occupancy = in_file_h5.root.HistOcc[:].T
        gdacs = analysis_utils.get_scan_parameter(in_file_h5.root.meta_data[:])['GDAC']
        with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file:
            plotting.plot_scatter(gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file)
            if analysis_configuration['input_file_calibration']:
                with tb.openFile(analysis_configuration['input_file_calibration'], mode="r") as in_file_calibration_h5:  # read calibration file from calibrate_threshold_gdac scan
                    mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:]
                    threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:]

                    gdac_range_calibration = np.array(in_file_calibration_h5.root.HistThresholdCalibration._v_attrs.scan_parameter_values)
                    gdac_range_source_scan = gdacs

                    # Select data that is within the given GDAC range, (min_gdac, max_gdac)
                    sel = np.where(np.logical_and(gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0]
                    gdac_range_source_scan = gdac_range_source_scan[sel]
                    occupancy = occupancy[:, :, sel]
                    sel = np.where(np.logical_and(gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0]
                    gdac_range_calibration = gdac_range_calibration[sel]
                    threshold_calibration_array = threshold_calibration_array[:, :, sel]

                    logging.info('Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan)))
                    logging.info('Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration)))

                    # rate_normalization of the total hit number for each GDAC setting
                    rate_normalization = 1.
                    if analysis_configuration['normalize_rate']:
                        rate_normalization = analysis_utils.get_rate_normalization(hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration['normalization_reference'], plot=analysis_configuration['plot_normalization'])

                    # correcting the hit numbers for the different cluster sizes
                    correction_factors = 1.
                    if analysis_configuration['use_cluster_rate_correction']:
                        correction_h5 = tb.openFile(cluster_sizes_file, mode="r")
                        cluster_size_histogram = correction_h5.root.AllHistClusterSize[:]
                        correction_factors = analysis_utils.get_hit_rate_correction(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram)
                        if analysis_configuration['plot_cluster_sizes']:
                            plot_cluster_sizes(correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan)

                    pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array)  # interpolates the threshold at the source scan GDAC setting from the calibration
                    pixel_hits = occupancy  # create hit array with shape (col, row, ...)
                    pixel_hits = pixel_hits * correction_factors * rate_normalization

                    # choose region with pixels that have a sufficient occupancy but are not too hot
                    good_pixel = analysis_utils.select_good_pixel_region(pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration['min_cut_threshold'], max_cut_threshold=analysis_configuration['max_cut_threshold'])
                    pixel_mask = ~np.ma.getmaskarray(good_pixel)
                    selected_pixel_hits = pixel_hits[pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
                    selected_pixel_thresholds = pixel_thresholds[pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
                    plotting.plot_occupancy(good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file)

                    # reshape to one dimension
                    x = selected_pixel_thresholds.flatten()
                    y = selected_pixel_hits.flatten()

                    # nothing should be NAN/INF, NAN/INF is not supported yet
                    if np.isfinite(x).shape != x.shape or np.isfinite(y).shape != y.shape:
                        logging.warning('There are pixels with NaN or INF threshold or hit values, analysis will fail')

                    # calculated profile histogram
                    x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(x, y, n_bins=analysis_configuration['n_bins'])  # profile histogram data

                    # select only the data point where the calibration worked
                    selected_data = np.logical_and(x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration'])
                    x_p = x_p[selected_data]
                    y_p = y_p[selected_data]
                    y_p_e = y_p_e[selected_data]

                    if len(y_p_e[y_p_e == 0]) != 0:
                        logging.warning('There are bins without any data, guessing the error bars')
                        y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0])

                    smoothed_data = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0)
                    smoothed_data_diff = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1)

                    with tb.openFile(data_analyzed_file[:-3] + '_result.h5', mode="w") as out_file_h5:
                        result_1 = np.rec.array(np.column_stack((x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)])
                        result_2 = np.rec.array(np.column_stack((x_p, smoothed_data)), dtype=[('charge', float), ('count', float)])
                        result_3 = np.rec.array(np.column_stack((x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)])
                        out_1 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title='Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                        out_2 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title='Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                        out_3 = out_file_h5.create_table(out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title='Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                        for key, value in analysis_configuration.iteritems():
                            out_1.attrs[key] = value
                            out_2.attrs[key] = value
                            out_3.attrs[key] = value
                        out_1.append(result_1)
                        out_2.append(result_2)
                        out_3.append(result_3)

                    plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff)

                    #  calculate and plot mean results
                    x_mean = analysis_utils.get_mean_threshold_from_calibration(gdac_range_source_scan, mean_threshold_calibration)
                    y_mean = selected_pixel_hits.mean(axis=(0))

                    plotting.plot_scatter(np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title='Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file)
                    plotting.plot_scatter(x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title='Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file)

                if analysis_configuration['use_cluster_rate_correction']:
                    correction_h5.close()
def analyze_injected_charge(data_analyzed_file):
    logging.info('Analyze the injected charge')
    with tb.openFile(data_analyzed_file, mode="r") as in_file_h5:
        occupancy = in_file_h5.root.HistOcc[:].T
        gdacs = analysis_utils.get_scan_parameter(
            in_file_h5.root.meta_data[:])['GDAC']
        with PdfPages(data_analyzed_file[:-3] + '.pdf') as plot_file:
            plotting.plot_scatter(
                gdacs,
                occupancy.sum(axis=(0, 1)),
                title='Single pixel hit rate at different thresholds',
                x_label='Threshold setting [GDAC]',
                y_label='Single pixel hit rate',
                log_x=True,
                filename=plot_file)
            if analysis_configuration['input_file_calibration']:
                with tb.openFile(
                        analysis_configuration['input_file_calibration'],
                        mode="r"
                ) as in_file_calibration_h5:  # read calibration file from calibrate_threshold_gdac scan
                    mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:]
                    threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:]

                    gdac_range_calibration = np.array(
                        in_file_calibration_h5.root.HistThresholdCalibration.
                        _v_attrs.scan_parameter_values)
                    gdac_range_source_scan = gdacs

                    # Select data that is within the given GDAC range, (min_gdac, max_gdac)
                    sel = np.where(
                        np.logical_and(
                            gdac_range_source_scan >=
                            analysis_configuration['min_gdac'],
                            gdac_range_source_scan <=
                            analysis_configuration['max_gdac']))[0]
                    gdac_range_source_scan = gdac_range_source_scan[sel]
                    occupancy = occupancy[:, :, sel]
                    sel = np.where(
                        np.logical_and(
                            gdac_range_calibration >=
                            analysis_configuration['min_gdac'],
                            gdac_range_calibration <=
                            analysis_configuration['max_gdac']))[0]
                    gdac_range_calibration = gdac_range_calibration[sel]
                    threshold_calibration_array = threshold_calibration_array[:, :,
                                                                              sel]

                    logging.info(
                        'Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d',
                        len(gdac_range_source_scan),
                        np.min(gdac_range_source_scan),
                        np.max(gdac_range_source_scan),
                        np.min(np.gradient(gdac_range_source_scan)),
                        np.max(np.gradient(gdac_range_source_scan)))
                    logging.info(
                        'Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d',
                        len(gdac_range_calibration),
                        np.min(gdac_range_calibration),
                        np.max(gdac_range_calibration),
                        np.min(np.gradient(gdac_range_calibration)),
                        np.max(np.gradient(gdac_range_calibration)))

                    # rate_normalization of the total hit number for each GDAC setting
                    rate_normalization = 1.
                    if analysis_configuration['normalize_rate']:
                        rate_normalization = analysis_utils.get_rate_normalization(
                            hit_file=hit_file,
                            cluster_file=hit_file,
                            parameter='GDAC',
                            reference=analysis_configuration[
                                'normalization_reference'],
                            plot=analysis_configuration['plot_normalization'])

                    # correcting the hit numbers for the different cluster sizes
                    correction_factors = 1.
                    if analysis_configuration['use_cluster_rate_correction']:
                        correction_h5 = tb.openFile(cluster_sizes_file,
                                                    mode="r")
                        cluster_size_histogram = correction_h5.root.AllHistClusterSize[:]
                        correction_factors = analysis_utils.get_hit_rate_correction(
                            gdacs=gdac_range_source_scan,
                            calibration_gdacs=gdac_range_source_scan,
                            cluster_size_histogram=cluster_size_histogram)
                        if analysis_configuration['plot_cluster_sizes']:
                            plot_cluster_sizes(
                                correction_h5,
                                in_file_calibration_h5,
                                gdac_range=gdac_range_source_scan)

                    pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(
                        gdacs=gdac_range_source_scan,
                        calibration_gdacs=gdac_range_calibration,
                        threshold_calibration_array=threshold_calibration_array
                    )  # interpolates the threshold at the source scan GDAC setting from the calibration
                    pixel_hits = occupancy  # create hit array with shape (col, row, ...)
                    pixel_hits = pixel_hits * correction_factors * rate_normalization

                    # choose region with pixels that have a sufficient occupancy but are not too hot
                    good_pixel = analysis_utils.select_good_pixel_region(
                        pixel_hits,
                        col_span=analysis_configuration['col_span'],
                        row_span=analysis_configuration['row_span'],
                        min_cut_threshold=analysis_configuration[
                            'min_cut_threshold'],
                        max_cut_threshold=analysis_configuration[
                            'max_cut_threshold'])
                    pixel_mask = ~np.ma.getmaskarray(good_pixel)
                    selected_pixel_hits = pixel_hits[
                        pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
                    selected_pixel_thresholds = pixel_thresholds[
                        pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
                    plotting.plot_occupancy(
                        good_pixel.T,
                        title='Selected pixel for analysis (' +
                        str(len(selected_pixel_hits)) + ')',
                        filename=plot_file)

                    # reshape to one dimension
                    x = selected_pixel_thresholds.flatten()
                    y = selected_pixel_hits.flatten()

                    # nothing should be NAN/INF, NAN/INF is not supported yet
                    if np.isfinite(x).shape != x.shape or np.isfinite(
                            y).shape != y.shape:
                        logging.warning(
                            'There are pixels with NaN or INF threshold or hit values, analysis will fail'
                        )

                    # calculated profile histogram
                    x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(
                        x, y, n_bins=analysis_configuration['n_bins']
                    )  # profile histogram data

                    # select only the data point where the calibration worked
                    selected_data = np.logical_and(
                        x_p > analysis_configuration['min_thr'] /
                        analysis_configuration['vcal_calibration'],
                        x_p < analysis_configuration['max_thr'] /
                        analysis_configuration['vcal_calibration'])
                    x_p = x_p[selected_data]
                    y_p = y_p[selected_data]
                    y_p_e = y_p_e[selected_data]

                    if len(y_p_e[y_p_e == 0]) != 0:
                        logging.warning(
                            'There are bins without any data, guessing the error bars'
                        )
                        y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0])

                    smoothed_data = analysis_utils.smooth_differentiation(
                        x_p,
                        y_p,
                        weigths=1 / y_p_e,
                        order=3,
                        smoothness=analysis_configuration['smoothness'],
                        derivation=0)
                    smoothed_data_diff = analysis_utils.smooth_differentiation(
                        x_p,
                        y_p,
                        weigths=1 / y_p_e,
                        order=3,
                        smoothness=analysis_configuration['smoothness'],
                        derivation=1)

                    with tb.openFile(data_analyzed_file[:-3] + '_result.h5',
                                     mode="w") as out_file_h5:
                        result_1 = np.rec.array(np.column_stack(
                            (x_p, y_p, y_p_e)),
                                                dtype=[('charge', float),
                                                       ('count', float),
                                                       ('count_error', float)])
                        result_2 = np.rec.array(np.column_stack(
                            (x_p, smoothed_data)),
                                                dtype=[('charge', float),
                                                       ('count', float)])
                        result_3 = np.rec.array(np.column_stack(
                            (x_p, -smoothed_data_diff)),
                                                dtype=[('charge', float),
                                                       ('count', float)])
                        out_1 = out_file_h5.create_table(
                            out_file_h5.root,
                            name='ProfileHistogram',
                            description=result_1.dtype,
                            title=
                            'Single pixel count rate combined with a profile histogram',
                            filters=tb.Filters(complib='blosc',
                                               complevel=5,
                                               fletcher32=False))
                        out_2 = out_file_h5.create_table(
                            out_file_h5.root,
                            name='ProfileHistogramSpline',
                            description=result_2.dtype,
                            title=
                            'Single pixel count rate combined with a profile histogram and spline smoothed',
                            filters=tb.Filters(complib='blosc',
                                               complevel=5,
                                               fletcher32=False))
                        out_3 = out_file_h5.create_table(
                            out_file_h5.root,
                            name='ChargeHistogram',
                            description=result_3.dtype,
                            title=
                            'Charge histogram with threshold method and per pixel calibration',
                            filters=tb.Filters(complib='blosc',
                                               complevel=5,
                                               fletcher32=False))
                        for key, value in analysis_configuration.iteritems():
                            out_1.attrs[key] = value
                            out_2.attrs[key] = value
                            out_3.attrs[key] = value
                        out_1.append(result_1)
                        out_2.append(result_2)
                        out_3.append(result_3)

                    plot_result(x_p, y_p, y_p_e, smoothed_data,
                                smoothed_data_diff)

                    #  calculate and plot mean results
                    x_mean = analysis_utils.get_mean_threshold_from_calibration(
                        gdac_range_source_scan, mean_threshold_calibration)
                    y_mean = selected_pixel_hits.mean(axis=(0))

                    plotting.plot_scatter(
                        np.array(gdac_range_source_scan),
                        y_mean,
                        log_x=True,
                        plot_range=None,
                        title=
                        'Mean single pixel cluster rate at different thresholds',
                        x_label='threshold setting [GDAC]',
                        y_label='mean single pixel cluster rate',
                        filename=plot_file)
                    plotting.plot_scatter(
                        x_mean * analysis_configuration['vcal_calibration'],
                        y_mean,
                        plot_range=(analysis_configuration['min_thr'],
                                    analysis_configuration['max_thr']),
                        title=
                        'Mean single pixel cluster rate at different thresholds',
                        x_label='mean threshold [e]',
                        y_label='mean single pixel cluster rate',
                        filename=plot_file)

                if analysis_configuration['use_cluster_rate_correction']:
                    correction_h5.close()
Exemple #4
0
def analyse_n_cluster_per_event(scan_base,
                                include_no_cluster=False,
                                time_line_absolute=True,
                                combine_n_readouts=1000,
                                chunk_size=10000000,
                                plot_n_cluster_hists=False,
                                output_pdf=None,
                                output_file=None):
    ''' Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts').

    Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    include_no_cluster: bool
        Set to true to also consider all events without any hit.
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    '''

    time_stamp = []
    n_cluster = []

    start_time_set = False

    for data_file in scan_base:
        with tb.open_file(data_file + '_interpreted.h5',
                          mode="r+") as in_cluster_file_h5:
            # get data and data pointer
            meta_data_array = in_cluster_file_h5.root.meta_data[:]
            cluster_table = in_cluster_file_h5.root.Cluster

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (analysis_utils.get_ranges_from_array(
                    meta_data_array['timestamp_start'][::combine_n_readouts]),
                 analysis_utils.get_ranges_from_array(
                     meta_data_array['event_number'][::combine_n_readouts])))

            # create a event_numer index (important for speed)
            analysis_utils.index_event_number(cluster_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            total_cluster = cluster_table.shape[0]

            progress_bar = progressbar.ProgressBar(widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
                                                   maxval=total_cluster,
                                                   term_width=80)
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(
                    parameter_ranges):
                logging.debug('Analyze time stamp ' + str(parameter_range[0]) +
                              ' and data from events = [' +
                              str(parameter_range[2]) + ',' +
                              str(parameter_range[3]) + '[ ' + str(
                                  int(
                                      float(
                                          float(parameter_index) /
                                          float(len(parameter_ranges)) *
                                          100.0))) + '%')
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_cluster_len = 0  # variable to calculate a optimal chunk size value from the number of hits for speed up
                hist = None
                for clusters, index in analysis_utils.data_aligned_at_events(
                        cluster_table,
                        start_event_number=parameter_range[2],
                        stop_event_number=parameter_range[3],
                        start_index=index,
                        chunk_size=best_chunk_size):
                    n_cluster_per_event = analysis_utils.get_n_cluster_in_events(
                        clusters['event_number']
                    )[:,
                      1]  # array with the number of cluster per event, cluster per event are at least 1
                    if hist is None:
                        hist = np.histogram(n_cluster_per_event,
                                            bins=10,
                                            range=(0, 10))[0]
                    else:
                        hist = np.add(
                            hist,
                            np.histogram(n_cluster_per_event,
                                         bins=10,
                                         range=(0, 10))[0])
                    if include_no_cluster and parameter_range[
                            3] is not None:  # happend for the last readout
                        hist[0] = (parameter_range[3] -
                                   parameter_range[2]) - len(
                                       n_cluster_per_event
                                   )  # add the events without any cluster
                    readout_cluster_len += clusters.shape[0]
                    total_cluster -= len(clusters)
                    progress_bar.update(index)
                best_chunk_size = int(1.5 * readout_cluster_len) if int(
                    1.05 * readout_cluster_len
                ) < chunk_size else chunk_size  # to increase the readout speed, estimated the number of hits for one read instruction

                if plot_n_cluster_hists:
                    plotting.plot_1d_hist(
                        hist,
                        title='Number of cluster per event at ' +
                        str(parameter_range[0]),
                        x_axis_title='Number of cluster',
                        y_axis_title='#',
                        log_y=True,
                        filename=output_pdf)
                hist = hist.astype('f4') / np.sum(
                    hist)  # calculate fraction from total numbers

                if time_line_absolute:
                    time_stamp.append(parameter_range[0])
                else:
                    if not start_time_set:
                        start_time = parameter_ranges[0, 0]
                        start_time_set = True
                    time_stamp.append((parameter_range[0] - start_time) / 60.0)
                n_cluster.append(hist)
            progress_bar.finish()
            if total_cluster != 0:
                logging.warning(
                    'Not all clusters were selected during analysis. Analysis is therefore not exact'
                )

    if time_line_absolute:
        plotting.plot_scatter_time(
            time_stamp,
            n_cluster,
            title='Number of cluster per event as a function of time',
            marker_style='o',
            filename=output_pdf,
            legend=('0 cluster', '1 cluster', '2 cluster',
                    '3 cluster') if include_no_cluster else
            ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster'))
    else:
        plotting.plot_scatter(
            time_stamp,
            n_cluster,
            title='Number of cluster per event as a function of time',
            x_label='time [min.]',
            marker_style='o',
            filename=output_pdf,
            legend=('0 cluster', '1 cluster', '2 cluster',
                    '3 cluster') if include_no_cluster else
            ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster'))
    if output_file:
        with tb.open_file(output_file, mode="a") as out_file_h5:
            cluster_array = np.array(n_cluster)
            rec_array = np.array(zip(time_stamp, cluster_array[:, 0],
                                     cluster_array[:, 1], cluster_array[:, 2],
                                     cluster_array[:, 3], cluster_array[:, 4],
                                     cluster_array[:, 5]),
                                 dtype=[('time_stamp', float),
                                        ('cluster_0', float),
                                        ('cluster_1', float),
                                        ('cluster_2', float),
                                        ('cluster_3', float),
                                        ('cluster_4', float),
                                        ('cluster_5', float)
                                        ]).view(np.recarray)
            try:
                n_cluster_table = out_file_h5.create_table(
                    out_file_h5.root,
                    name='n_cluster',
                    description=rec_array,
                    title='Cluster per event',
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                n_cluster_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(
                    output_file +
                    ' has already a Beamspot note, do not overwrite existing.')
    return time_stamp, n_cluster
Exemple #5
0
    def scan(self):
        with PdfPages(self.output_filename + '.pdf') as output_pdf:
            if self.test_tdc_values:
                x, y, y_err = [], [], []
                tdc_hist = None

                self.fifo_readout.reset_sram_fifo()  # clear fifo data
                for pulse_width in [i for j in (range(10, 100, 5), range(100, 400, 10)) for i in j]:
                    logging.info('Test TDC for a pulse with of %d', pulse_width)
                    self.start_pulser(pulse_width, self.n_pulses)
                    time.sleep(self.n_pulses * pulse_width * 1e-9 + 0.1)
                    data = self.fifo_readout.read_data()
                    if data[is_tdc_word(data)].shape[0] != 0:
                        tdc_values = np.bitwise_and(data[is_tdc_word(data)], 0x00000FFF)
                        tdc_counter = np.bitwise_and(data[is_tdc_word(data)], 0x000FF000)
                        tdc_counter = np.right_shift(tdc_counter, 12)
                        if len(is_tdc_word(data)) != self.n_pulses:
                            logging.warning('%d TDC words instead of %d ', len(is_tdc_word(data)), self.n_pulses)
                        try:
                            if np.any(np.logical_and(tdc_counter[np.gradient(tdc_counter) != 1] != 0, tdc_counter[np.gradient(tdc_counter) != 1] != 255)):
                                logging.warning('The counter did not count correctly')
                        except ValueError:
                            logging.warning('The counter did not count correctly')

                        x.append(pulse_width)
                        y.append(np.mean(tdc_values))
                        y_err.append(np.std(tdc_values))
                        if tdc_hist is None:
                            tdc_hist = np.histogram(tdc_values, range=(0, 1023), bins=1024)[0]
                        else:
                            tdc_hist += np.histogram(tdc_values, range=(0, 1023), bins=1024)[0]
                    else:
                        logging.warning('No TDC words, check connection!')

                plotting.plot_scatter(x, y, y_err, title='FPGA TDC linearity, ' + str(self.n_pulses) + ' each', x_label='Pulse width [ns]', y_label='TDC value', filename=output_pdf)
                plotting.plot_scatter(x, y_err, title='FPGA TDC RMS, ' + str(self.n_pulses) + ' each', x_label='Pulse width [ns]', y_label='TDC RMS', filename=output_pdf)
                if tdc_hist is not None:
                    plotting.plot_tdc_counter(tdc_hist, title='All TDC values', filename=output_pdf)

            if self.test_trigger_delay:
                x, y, y_err, y2, y2_err = [], [], [], [], []
                self.fifo_readout.reset_sram_fifo()  # clear fifo data
                for pulse_delay in [i for j in (range(0, 100, 5), range(100, 500, 500)) for i in j]:
                    logging.info('Test TDC for a pulse delay of %d', pulse_delay)
                    for _ in range(10):
                        self.start_pulser(pulse_width=100, n_pulses=1, pulse_delay=pulse_delay)
                        time.sleep(0.1)
                    data = self.fifo_readout.read_data()
                    if data[is_tdc_word(data)].shape[0] != 0:
                        if len(is_tdc_word(data)) != 10:
                            logging.warning('%d TDC words instead of %d ', len(is_tdc_word(data)), 10)
                        tdc_values = np.bitwise_and(data[is_tdc_word(data)], 0x00000FFF)
                        tdc_delay = np.bitwise_and(data[is_tdc_word(data)], 0x0FF00000)
                        tdc_delay = np.right_shift(tdc_delay, 20)

                        x.append(pulse_delay)
                        y.append(np.mean(tdc_delay))
                        y_err.append(np.std(tdc_delay))
                        y2.append(np.mean(tdc_values))
                        y2_err.append(np.std(tdc_values))
                    else:
                        logging.warning('No TDC words, check connection!')

                plotting.plot_scatter(x, y2, y2_err, title='FPGA TDC for different delays, ' + str(self.n_pulses) + ' each', x_label='Pulse delay [ns]', y_label='TDC value', filename=output_pdf)
                plotting.plot_scatter(x, y, y_err, title='FPGA TDC trigger delay, ' + str(10) + ' each', x_label='Pulse delay [ns]', y_label='TDC trigger delay', filename=output_pdf)
                plotting.plot_scatter(x, y_err, title='FPGA TDC trigger delay RMS, ' + str(10) + ' each', x_label='Pulse delay [ns]', y_label='TDC trigger delay RMS', filename=output_pdf)
Exemple #6
0
def analyze_event_rate(scan_base,
                       combine_n_readouts=1000,
                       time_line_absolute=True,
                       output_pdf=None,
                       output_file=None):
    ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info
    and stored into a pdf file.

    Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    time_line_absolute: bool
        if true the analysis uses absolute time stamps
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    '''
    time_stamp = []
    rate = []

    start_time_set = False

    for data_file in scan_base:
        with tb.open_file(data_file + '_interpreted.h5',
                          mode="r") as in_file_h5:
            meta_data_array = in_file_h5.root.meta_data[:]
            parameter_ranges = np.column_stack(
                (analysis_utils.get_ranges_from_array(
                    meta_data_array['timestamp_start'][::combine_n_readouts]),
                 analysis_utils.get_ranges_from_array(
                     meta_data_array['event_number'][::combine_n_readouts])))

            if time_line_absolute:
                time_stamp.extend(parameter_ranges[:-1, 0])
            else:
                if not start_time_set:
                    start_time = parameter_ranges[0, 0]
                    start_time_set = True
                time_stamp.extend(
                    (parameter_ranges[:-1, 0] - start_time) / 60.0)
            rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) /
                        (parameter_ranges[:-1, 1] -
                         parameter_ranges[:-1, 0]))  # d#Events / dt
    if time_line_absolute:
        plotting.plot_scatter_time(time_stamp,
                                   rate,
                                   title='Event rate [Hz]',
                                   marker_style='o',
                                   filename=output_pdf)
    else:
        plotting.plot_scatter(time_stamp,
                              rate,
                              title='Events per time',
                              x_label='Progressed time [min.]',
                              y_label='Events rate [Hz]',
                              marker_style='o',
                              filename=output_pdf)
    if output_file:
        with tb.open_file(output_file, mode="a") as out_file_h5:
            rec_array = np.array(zip(time_stamp, rate),
                                 dtype=[('time_stamp', float),
                                        ('rate', float)]).view(np.recarray)
            try:
                rate_table = out_file_h5.create_table(out_file_h5.root,
                                                      name='Eventrate',
                                                      description=rec_array,
                                                      title='Event rate',
                                                      filters=tb.Filters(
                                                          complib='blosc',
                                                          complevel=5,
                                                          fletcher32=False))
                rate_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(
                    output_file +
                    ' has already a Eventrate note, do not overwrite existing.'
                )
    return time_stamp, rate
Exemple #7
0
def analyze_beam_spot(scan_base,
                      combine_n_readouts=1000,
                      chunk_size=10000000,
                      plot_occupancy_hists=False,
                      output_pdf=None,
                      output_file=None):
    ''' Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
    for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.

     Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    '''
    time_stamp = []
    x = []
    y = []

    for data_file in scan_base:
        with tb.open_file(data_file + '_interpreted.h5',
                          mode="r+") as in_hit_file_h5:
            # get data and data pointer
            meta_data_array = in_hit_file_h5.root.meta_data[:]
            hit_table = in_hit_file_h5.root.Hits

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (analysis_utils.get_ranges_from_array(
                    meta_data_array['timestamp_start'][::combine_n_readouts]),
                 analysis_utils.get_ranges_from_array(
                     meta_data_array['event_number'][::combine_n_readouts])))

            # create a event_numer index (important)
            analysis_utils.index_event_number(hit_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False
            analyze_data.histogram.set_no_scan_parameter()

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            progress_bar = progressbar.ProgressBar(widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
                                                   maxval=hit_table.shape[0],
                                                   term_width=80)
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(
                    parameter_ranges):
                logging.debug('Analyze time stamp ' + str(parameter_range[0]) +
                              ' and data from events = [' +
                              str(parameter_range[2]) + ',' +
                              str(parameter_range[3]) + '[ ' + str(
                                  int(
                                      float(
                                          float(parameter_index) /
                                          float(len(parameter_ranges)) *
                                          100.0))) + '%')
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_hit_len = 0  # variable to calculate a optimal chunk size value from the number of hits for speed up
                for hits, index in analysis_utils.data_aligned_at_events(
                        hit_table,
                        start_event_number=parameter_range[2],
                        stop_event_number=parameter_range[3],
                        start_index=index,
                        chunk_size=best_chunk_size):
                    analyze_data.analyze_hits(
                        hits)  # analyze the selected hits in chunks
                    readout_hit_len += hits.shape[0]
                    progress_bar.update(index)
                best_chunk_size = int(1.5 * readout_hit_len) if int(
                    1.05 * readout_hit_len
                ) < chunk_size else chunk_size  # to increase the readout speed, estimated the number of hits for one read instruction

                # get and store results
                occupancy_array = analyze_data.histogram.get_occupancy()
                projection_x = np.sum(occupancy_array, axis=0).ravel()
                projection_y = np.sum(occupancy_array, axis=1).ravel()
                x.append(
                    analysis_utils.get_mean_from_histogram(projection_x,
                                                           bin_positions=range(
                                                               0, 80)))
                y.append(
                    analysis_utils.get_mean_from_histogram(projection_y,
                                                           bin_positions=range(
                                                               0, 336)))
                time_stamp.append(parameter_range[0])
                if plot_occupancy_hists:
                    plotting.plot_occupancy(
                        occupancy_array[:, :, 0],
                        title='Occupancy for events between ' + time.strftime(
                            '%H:%M:%S', time.localtime(parameter_range[0])) +
                        ' and ' + time.strftime(
                            '%H:%M:%S', time.localtime(parameter_range[1])),
                        filename=output_pdf)
            progress_bar.finish()
    plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y],
                          title='Mean beam position',
                          x_label='x [um]',
                          y_label='y [um]',
                          marker_style='-o',
                          filename=output_pdf)
    if output_file:
        with tb.open_file(output_file, mode="a") as out_file_h5:
            rec_array = np.array(zip(time_stamp, x, y),
                                 dtype=[('time_stamp', float), ('x', float),
                                        ('y', float)])
            try:
                beam_spot_table = out_file_h5.create_table(
                    out_file_h5.root,
                    name='Beamspot',
                    description=rec_array,
                    title='Beam spot position',
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                beam_spot_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(
                    output_file +
                    ' has already a Beamspot note, do not overwrite existing.')
    return time_stamp, x, y
    def analyze(self):
        def analyze_raw_data_file(file_name):
            with AnalyzeRawData(raw_data_file=file_name,
                                create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(
                    True
                )  # so far the data structure in a threshold scan was always bad, too many warnings given
                analyze_raw_data.interpret_word_table()

        def store_calibration_data_as_table(out_file_h5,
                                            mean_threshold_calibration,
                                            mean_threshold_rms_calibration,
                                            threshold_calibration,
                                            parameter_values):
            logging.info("Storing calibration data in a table...")
            filter_table = tb.Filters(complib='blosc',
                                      complevel=5,
                                      fletcher32=False)
            mean_threshold_calib_table = out_file_h5.createTable(
                out_file_h5.root,
                name='MeanThresholdCalibration',
                description=data_struct.MeanThresholdCalibrationTable,
                title='mean_threshold_calibration',
                filters=filter_table)
            threshold_calib_table = out_file_h5.createTable(
                out_file_h5.root,
                name='ThresholdCalibration',
                description=data_struct.ThresholdCalibrationTable,
                title='threshold_calibration',
                filters=filter_table)
            for column in range(80):
                for row in range(336):
                    for parameter_value_index, parameter_value in enumerate(
                            parameter_values):
                        threshold_calib_table.row['column'] = column
                        threshold_calib_table.row['row'] = row
                        threshold_calib_table.row[
                            'parameter_value'] = parameter_value
                        threshold_calib_table.row[
                            'threshold'] = threshold_calibration[
                                column, row, parameter_value_index]
                        threshold_calib_table.row.append()
            for parameter_value_index, parameter_value in enumerate(
                    parameter_values):
                mean_threshold_calib_table.row[
                    'parameter_value'] = parameter_value
                mean_threshold_calib_table.row[
                    'mean_threshold'] = mean_threshold_calibration[
                        parameter_value_index]
                mean_threshold_calib_table.row[
                    'threshold_rms'] = mean_threshold_rms_calibration[
                        parameter_value_index]
                mean_threshold_calib_table.row.append()
            threshold_calib_table.flush()
            mean_threshold_calib_table.flush()
            logging.info("done")

        def store_calibration_data_as_array(out_file_h5,
                                            mean_threshold_calibration,
                                            mean_threshold_rms_calibration,
                                            threshold_calibration):
            logging.info("Storing calibration data in an array...")
            filter_table = tb.Filters(complib='blosc',
                                      complevel=5,
                                      fletcher32=False)
            mean_threshold_calib_array = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistThresholdMeanCalibration',
                atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
                shape=mean_threshold_calibration.shape,
                title='mean_threshold_calibration',
                filters=filter_table)
            mean_threshold_calib_rms_array = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistThresholdRMSCalibration',
                atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype),
                shape=mean_threshold_calibration.shape,
                title='mean_threshold_rms_calibration',
                filters=filter_table)
            threshold_calib_array = out_file_h5.createCArray(
                out_file_h5.root,
                name='HistThresholdCalibration',
                atom=tb.Atom.from_dtype(threshold_calibration.dtype),
                shape=threshold_calibration.shape,
                title='threshold_calibration',
                filters=filter_table)
            mean_threshold_calib_array[:] = mean_threshold_calibration
            mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
            threshold_calib_array[:] = threshold_calibration
            logging.info("done")

        def mask_columns(pixel_array, ignore_columns):
            idx = np.array(ignore_columns) - 1  # from FE to Array columns
            m = np.zeros_like(pixel_array)
            m[:, idx] = 1
            return np.ma.masked_array(pixel_array, m)

        calibration_file = self.output_filename + '_calibration'
        raw_data_files = analysis_utils.get_data_file_names_from_scan_base(
            self.output_filename,
            filter_file_words=['interpreted', 'calibration_calibration'])
        parameter_name = self.scan_parameters._fields[1]

        for raw_data_file in raw_data_files:  # no using multithreading here, it is already used in fit
            analyze_raw_data_file(raw_data_file)

        files_per_parameter = analysis_utils.get_parameter_value_from_file_names(
            [
                file_name[:-3] + '_interpreted.h5'
                for file_name in raw_data_files
            ], parameter_name)

        logging.info("Create calibration from data")
        with tb.openFile(
                self.output_filename + '.h5',
                mode="r") as in_file_h5:  # deduce settings from raw data file
            ignore_columns = in_file_h5.root.configuration.run_conf[:][
                np.where(in_file_h5.root.configuration.run_conf[:]['name'] ==
                         'ignore_columns')]['value'][0]
            ignore_columns = ast.literal_eval(ignore_columns)

        mean_threshold_calibration = np.empty(shape=(len(raw_data_files), ),
                                              dtype='<f8')
        mean_threshold_rms_calibration = np.empty(
            shape=(len(raw_data_files), ), dtype='<f8')
        threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)),
                                         dtype='<f8')

        if self.create_plots:
            logging.info('Saving calibration plots in: %s' %
                         (calibration_file + '.pdf'))
            output_pdf = PdfPages(calibration_file + '.pdf')

        parameter_values = []
        for index, (analyzed_data_file,
                    parameters) in enumerate(files_per_parameter.items()):
            parameter_values.append(parameters.values()[0][0])
            with tb.openFile(analyzed_data_file, mode="r") as in_file_h5:
                occupancy_masked = mask_columns(
                    pixel_array=in_file_h5.root.HistOcc[:],
                    ignore_columns=ignore_columns
                )  # mask the not scanned columns for analysis and plotting
                thresholds_masked = mask_columns(
                    pixel_array=in_file_h5.root.HistThresholdFitted[:],
                    ignore_columns=ignore_columns)
                if self.create_plots:
                    plotThreeWay(hist=thresholds_masked,
                                 title='Threshold Fitted for ' +
                                 parameters.keys()[0] + ' = ' +
                                 str(parameters.values()[0][0]),
                                 filename=output_pdf)
                    plsr_dacs = analysis_utils.get_scan_parameter(
                        meta_data_array=in_file_h5.root.meta_data[:]
                    )['PlsrDAC']
                    plot_scurves(occupancy_hist=occupancy_masked,
                                 scan_parameters=plsr_dacs,
                                 scan_parameter_name='PlsrDAC',
                                 filename=output_pdf)
                # fill the calibration data arrays
                mean_threshold_calibration[index] = np.ma.mean(
                    thresholds_masked)
                mean_threshold_rms_calibration[index] = np.ma.std(
                    thresholds_masked)
                threshold_calibration[:, :, index] = thresholds_masked.T

        with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5:
            store_calibration_data_as_array(
                out_file_h5=out_file_h5,
                mean_threshold_calibration=mean_threshold_calibration,
                mean_threshold_rms_calibration=mean_threshold_rms_calibration,
                threshold_calibration=threshold_calibration)
            store_calibration_data_as_table(
                out_file_h5=out_file_h5,
                mean_threshold_calibration=mean_threshold_calibration,
                mean_threshold_rms_calibration=mean_threshold_rms_calibration,
                threshold_calibration=threshold_calibration,
                parameter_values=parameter_values)

        if self.create_plots:
            plot_scatter(x=parameter_values,
                         y=mean_threshold_calibration,
                         title='Threshold calibration',
                         x_label=parameter_name,
                         y_label='Mean threshold',
                         log_x=False,
                         filename=output_pdf)
            plot_scatter(x=parameter_values,
                         y=mean_threshold_calibration,
                         title='Threshold calibration',
                         x_label=parameter_name,
                         y_label='Mean threshold',
                         log_x=True,
                         filename=output_pdf)
            output_pdf.close()
Exemple #9
0
def analyze_injected_charge(data_analyzed_file):
    logging.info('Analyze the injected charge')
    with tb.openFile(data_analyzed_file, mode="r") as in_file_h5:
        occupancy = in_file_h5.root.HistOcc[:]
        gdacs = analysis_utils.get_scan_parameter(
            in_file_h5.root.meta_data[:])['GDAC']
        with tb.openFile(
                analysis_configuration['input_file_calibration'], mode="r"
        ) as in_file_calibration_h5:  # read calibration file from calibrate_threshold_gdac scan
            mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:]
            threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:]

            gdac_range_calibration = mean_threshold_calibration['gdac']
            gdac_range_source_scan = gdacs

            logging.info(
                'Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d'
                % (len(gdac_range_source_scan), np.min(gdac_range_source_scan),
                   np.max(gdac_range_source_scan),
                   np.min(np.gradient(gdac_range_source_scan)),
                   np.max(np.gradient(gdac_range_source_scan))))
            logging.info(
                'Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d'
                % (len(gdac_range_calibration), np.min(gdac_range_calibration),
                   np.max(gdac_range_calibration),
                   np.min(np.gradient(gdac_range_calibration)),
                   np.max(np.gradient(gdac_range_calibration))))

            # rate_normalization of the total hit number for each GDAC setting
            rate_normalization = 1.
            if analysis_configuration['normalize_rate']:
                rate_normalization = analysis_utils.get_rate_normalization(
                    hit_file=hit_file,
                    cluster_file=hit_file,
                    parameter='GDAC',
                    reference=analysis_configuration[
                        'normalization_reference'],
                    plot=analysis_configuration['plot_normalization'])

            # correcting the hit numbers for the different cluster sizes
            correction_factors = 1.
            if analysis_configuration['use_cluster_rate_correction']:
                correction_h5 = tb.openFile(cluster_sizes_file, mode="r")
                cluster_size_histogram = correction_h5.root.AllHistClusterSize[:]
                correction_factors = analysis_utils.get_hit_rate_correction(
                    gdacs=gdac_range_source_scan,
                    calibration_gdacs=gdac_range_source_scan,
                    cluster_size_histogram=cluster_size_histogram)
                if analysis_configuration['plot_cluster_sizes']:
                    plot_cluster_sizes(correction_h5,
                                       in_file_calibration_h5,
                                       gdac_range=gdac_range_source_scan)

            print correction_factors

            pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(
                gdacs=gdac_range_source_scan,
                calibration_gdacs=gdac_range_calibration,
                threshold_calibration_array=threshold_calibration_array
            )  # interpolates the threshold at the source scan GDAC setting from the calibration
            pixel_hits = np.swapaxes(
                occupancy, 0, 1)  # create hit array with shape (col, row, ...)
            pixel_hits = pixel_hits * correction_factors * rate_normalization

            # choose region with pixels that have a sufficient occupancy but are not too hot
            good_pixel = analysis_utils.select_good_pixel_region(
                pixel_hits,
                col_span=analysis_configuration['col_span'],
                row_span=analysis_configuration['row_span'],
                min_cut_threshold=analysis_configuration['min_cut_threshold'],
                max_cut_threshold=analysis_configuration['max_cut_threshold'])
            pixel_mask = ~np.ma.getmaskarray(good_pixel)
            selected_pixel_hits = pixel_hits[
                pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
            selected_pixel_thresholds = pixel_thresholds[
                pixel_mask, :]  # reduce the data to pixels that are in the good pixel region
            plotting.plot_occupancy(good_pixel.T,
                                    title='Select ' +
                                    str(len(selected_pixel_hits)) +
                                    ' pixels for analysis')

            # reshape to one dimension
            x = selected_pixel_thresholds.flatten()
            y = selected_pixel_hits.flatten()

            #nothing should be NAN, NAN is not supported yet
            if np.isfinite(x).shape != x.shape or np.isfinite(
                    y).shape != y.shape:
                logging.warning(
                    'There are pixels with NaN or INF threshold or hit values, analysis will fail'
                )

            # calculated profile histogram
            x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(
                x, y, n_bins=analysis_configuration['n_bins']
            )  # profile histogram data

            # select only the data point where the calibration worked
            selected_data = np.logical_and(
                x_p > analysis_configuration['min_thr'] /
                analysis_configuration['vcal_calibration'],
                x_p < analysis_configuration['max_thr'] /
                analysis_configuration['vcal_calibration'])
            x_p = x_p[selected_data]
            y_p = y_p[selected_data]
            y_p_e = y_p_e[selected_data]

            plot_result(x_p, y_p, y_p_e)

            #  calculate and plot mean results
            x_mean = analysis_utils.get_mean_threshold_from_calibration(
                gdac_range_source_scan, mean_threshold_calibration)
            y_mean = selected_pixel_hits.mean(axis=(0))

            plotting.plot_scatter(
                np.array(gdac_range_source_scan),
                y_mean,
                log_x=True,
                plot_range=None,
                title='Mean single pixel cluster rate at different thresholds',
                x_label='threshold setting [GDAC]',
                y_label='mean single pixel cluster rate')
            plotting.plot_scatter(
                x_mean * analysis_configuration['vcal_calibration'],
                y_mean,
                plot_range=(analysis_configuration['min_thr'],
                            analysis_configuration['max_thr']),
                title='Mean single pixel cluster rate at different thresholds',
                x_label='mean threshold [e]',
                y_label='mean single pixel cluster rate')

        if analysis_configuration['use_cluster_rate_correction']:
            correction_h5.close()
Exemple #10
0
def analyse_n_cluster_per_event(
    scan_base,
    include_no_cluster=False,
    time_line_absolute=True,
    combine_n_readouts=1000,
    chunk_size=10000000,
    plot_n_cluster_hists=False,
    output_pdf=None,
    output_file=None,
):
    """ Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts').

    Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    include_no_cluster: bool
        Set to true to also consider all events without any hit.
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    """

    time_stamp = []
    n_cluster = []

    start_time_set = False

    for data_file in scan_base:
        with tb.openFile(data_file + "_interpreted.h5", mode="r+") as in_cluster_file_h5:
            # get data and data pointer
            meta_data_array = in_cluster_file_h5.root.meta_data[:]
            cluster_table = in_cluster_file_h5.root.Cluster

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (
                    analysis_utils.get_ranges_from_array(meta_data_array["timestamp_start"][::combine_n_readouts]),
                    analysis_utils.get_ranges_from_array(meta_data_array["event_number"][::combine_n_readouts]),
                )
            )

            # create a event_numer index (important for speed)
            analysis_utils.index_event_number(cluster_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            total_cluster = cluster_table.shape[0]

            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    analysis_utils.ETA(),
                ],
                maxval=total_cluster,
                term_width=80,
            )
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(parameter_ranges):
                logging.debug(
                    "Analyze time stamp "
                    + str(parameter_range[0])
                    + " and data from events = ["
                    + str(parameter_range[2])
                    + ","
                    + str(parameter_range[3])
                    + "[ "
                    + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0)))
                    + "%"
                )
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_cluster_len = (
                    0
                )  # variable to calculate a optimal chunk size value from the number of hits for speed up
                hist = None
                for clusters, index in analysis_utils.data_aligned_at_events(
                    cluster_table,
                    start_event_number=parameter_range[2],
                    stop_event_number=parameter_range[3],
                    start=index,
                    chunk_size=best_chunk_size,
                ):
                    n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters["event_number"])[
                        :, 1
                    ]  # array with the number of cluster per event, cluster per event are at least 1
                    if hist is None:
                        hist = np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0]
                    else:
                        hist = np.add(hist, np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0])
                    if include_no_cluster and parameter_range[3] is not None:  # happend for the last readout
                        hist[0] = (parameter_range[3] - parameter_range[2]) - len(
                            n_cluster_per_event
                        )  # add the events without any cluster
                    readout_cluster_len += clusters.shape[0]
                    total_cluster -= len(clusters)
                    progress_bar.update(index)
                best_chunk_size = (
                    int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size
                )  # to increase the readout speed, estimated the number of hits for one read instruction

                if plot_n_cluster_hists:
                    plotting.plot_1d_hist(
                        hist,
                        title="Number of cluster per event at " + str(parameter_range[0]),
                        x_axis_title="Number of cluster",
                        y_axis_title="#",
                        log_y=True,
                        filename=output_pdf,
                    )
                hist = hist.astype("f4") / np.sum(hist)  # calculate fraction from total numbers

                if time_line_absolute:
                    time_stamp.append(parameter_range[0])
                else:
                    if not start_time_set:
                        start_time = parameter_ranges[0, 0]
                        start_time_set = True
                    time_stamp.append((parameter_range[0] - start_time) / 60.0)
                n_cluster.append(hist)
            progress_bar.finish()
            if total_cluster != 0:
                logging.warning("Not all clusters were selected during analysis. Analysis is therefore not exact")

    if time_line_absolute:
        plotting.plot_scatter_time(
            time_stamp,
            n_cluster,
            title="Number of cluster per event as a function of time",
            marker_style="o",
            filename=output_pdf,
            legend=("0 cluster", "1 cluster", "2 cluster", "3 cluster")
            if include_no_cluster
            else ("0 cluster not plotted", "1 cluster", "2 cluster", "3 cluster"),
        )
    else:
        plotting.plot_scatter(
            time_stamp,
            n_cluster,
            title="Number of cluster per event as a function of time",
            x_label="time [min.]",
            marker_style="o",
            filename=output_pdf,
            legend=("0 cluster", "1 cluster", "2 cluster", "3 cluster")
            if include_no_cluster
            else ("0 cluster not plotted", "1 cluster", "2 cluster", "3 cluster"),
        )
    if output_file:
        with tb.openFile(output_file, mode="a") as out_file_h5:
            cluster_array = np.array(n_cluster)
            rec_array = np.array(
                zip(
                    time_stamp,
                    cluster_array[:, 0],
                    cluster_array[:, 1],
                    cluster_array[:, 2],
                    cluster_array[:, 3],
                    cluster_array[:, 4],
                    cluster_array[:, 5],
                ),
                dtype=[
                    ("time_stamp", float),
                    ("cluster_0", float),
                    ("cluster_1", float),
                    ("cluster_2", float),
                    ("cluster_3", float),
                    ("cluster_4", float),
                    ("cluster_5", float),
                ],
            ).view(np.recarray)
            try:
                n_cluster_table = out_file_h5.createTable(
                    out_file_h5.root,
                    name="n_cluster",
                    description=rec_array,
                    title="Cluster per event",
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                n_cluster_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(output_file + " has already a Beamspot note, do not overwrite existing.")
    return time_stamp, n_cluster
Exemple #11
0
def analyze_beam_spot(
    scan_base,
    combine_n_readouts=1000,
    chunk_size=10000000,
    plot_occupancy_hists=False,
    output_pdf=None,
    output_file=None,
):
    """ Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
    for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.

     Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    max_chunk_size: int
        the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    """
    time_stamp = []
    x = []
    y = []

    for data_file in scan_base:
        with tb.openFile(data_file + "_interpreted.h5", mode="r+") as in_hit_file_h5:
            # get data and data pointer
            meta_data_array = in_hit_file_h5.root.meta_data[:]
            hit_table = in_hit_file_h5.root.Hits

            # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
            parameter_ranges = np.column_stack(
                (
                    analysis_utils.get_ranges_from_array(meta_data_array["timestamp_start"][::combine_n_readouts]),
                    analysis_utils.get_ranges_from_array(meta_data_array["event_number"][::combine_n_readouts]),
                )
            )

            # create a event_numer index (important)
            analysis_utils.index_event_number(hit_table)

            # initialize the analysis and set settings
            analyze_data = AnalyzeRawData()
            analyze_data.create_tot_hist = False
            analyze_data.create_bcid_hist = False
            analyze_data.histograming.set_no_scan_parameter()

            # variables for read speed up
            index = 0  # index where to start the read out, 0 at the beginning, increased during looping
            best_chunk_size = chunk_size

            progress_bar = progressbar.ProgressBar(
                widgets=[
                    "",
                    progressbar.Percentage(),
                    " ",
                    progressbar.Bar(marker="*", left="|", right="|"),
                    " ",
                    analysis_utils.ETA(),
                ],
                maxval=hit_table.shape[0],
                term_width=80,
            )
            progress_bar.start()

            # loop over the selected events
            for parameter_index, parameter_range in enumerate(parameter_ranges):
                logging.debug(
                    "Analyze time stamp "
                    + str(parameter_range[0])
                    + " and data from events = ["
                    + str(parameter_range[2])
                    + ","
                    + str(parameter_range[3])
                    + "[ "
                    + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0)))
                    + "%"
                )
                analyze_data.reset()  # resets the data of the last analysis

                # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given
                readout_hit_len = (
                    0
                )  # variable to calculate a optimal chunk size value from the number of hits for speed up
                for hits, index in analysis_utils.data_aligned_at_events(
                    hit_table,
                    start_event_number=parameter_range[2],
                    stop_event_number=parameter_range[3],
                    start=index,
                    chunk_size=best_chunk_size,
                ):
                    analyze_data.analyze_hits(hits)  # analyze the selected hits in chunks
                    readout_hit_len += hits.shape[0]
                    progress_bar.update(index)
                best_chunk_size = (
                    int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size
                )  # to increase the readout speed, estimated the number of hits for one read instruction

                # get and store results
                occupancy_array = analyze_data.histograming.get_occupancy()
                projection_x = np.sum(occupancy_array, axis=0).ravel()
                projection_y = np.sum(occupancy_array, axis=1).ravel()
                x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80)))
                y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336)))
                time_stamp.append(parameter_range[0])
                if plot_occupancy_hists:
                    plotting.plot_occupancy(
                        occupancy_array[:, :, 0],
                        title="Occupancy for events between "
                        + time.strftime("%H:%M:%S", time.localtime(parameter_range[0]))
                        + " and "
                        + time.strftime("%H:%M:%S", time.localtime(parameter_range[1])),
                        filename=output_pdf,
                    )
            progress_bar.finish()
    plotting.plot_scatter(
        [i * 250 for i in x],
        [i * 50 for i in y],
        title="Mean beam position",
        x_label="x [um]",
        y_label="y [um]",
        marker_style="-o",
        filename=output_pdf,
    )
    if output_file:
        with tb.openFile(output_file, mode="a") as out_file_h5:
            rec_array = np.array(zip(time_stamp, x, y), dtype=[("time_stamp", float), ("x", float), ("y", float)])
            try:
                beam_spot_table = out_file_h5.createTable(
                    out_file_h5.root,
                    name="Beamspot",
                    description=rec_array,
                    title="Beam spot position",
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                beam_spot_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(output_file + " has already a Beamspot note, do not overwrite existing.")
    return time_stamp, x, y
Exemple #12
0
def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None):
    """ Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info
    and stored into a pdf file.

    Parameters
    ----------
    scan_base: list of str
        scan base names (e.g.:  ['//data//SCC_50_fei4_self_trigger_scan_390', ]
    combine_n_readouts: int
        the number of read outs to combine (e.g. 1000)
    time_line_absolute: bool
        if true the analysis uses absolute time stamps
    output_pdf: PdfPages
        PdfPages file object, if none the plot is printed to screen
    """
    time_stamp = []
    rate = []

    start_time_set = False

    for data_file in scan_base:
        with tb.openFile(data_file + "_interpreted.h5", mode="r") as in_file_h5:
            meta_data_array = in_file_h5.root.meta_data[:]
            parameter_ranges = np.column_stack(
                (
                    analysis_utils.get_ranges_from_array(meta_data_array["timestamp_start"][::combine_n_readouts]),
                    analysis_utils.get_ranges_from_array(meta_data_array["event_number"][::combine_n_readouts]),
                )
            )

            if time_line_absolute:
                time_stamp.extend(parameter_ranges[:-1, 0])
            else:
                if not start_time_set:
                    start_time = parameter_ranges[0, 0]
                    start_time_set = True
                time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0)
            rate.extend(
                (parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2])
                / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])
            )  # d#Events / dt
    if time_line_absolute:
        plotting.plot_scatter_time(time_stamp, rate, title="Event rate [Hz]", marker_style="o", filename=output_pdf)
    else:
        plotting.plot_scatter(
            time_stamp,
            rate,
            title="Events per time",
            x_label="Progressed time [min.]",
            y_label="Events rate [Hz]",
            marker_style="o",
            filename=output_pdf,
        )
    if output_file:
        with tb.openFile(output_file, mode="a") as out_file_h5:
            rec_array = np.array(zip(time_stamp, rate), dtype=[("time_stamp", float), ("rate", float)]).view(
                np.recarray
            )
            try:
                rate_table = out_file_h5.createTable(
                    out_file_h5.root,
                    name="Eventrate",
                    description=rec_array,
                    title="Event rate",
                    filters=tb.Filters(complib="blosc", complevel=5, fletcher32=False),
                )
                rate_table[:] = rec_array
            except tb.exceptions.NodeError:
                logging.warning(output_file + " has already a Eventrate note, do not overwrite existing.")
    return time_stamp, rate
Exemple #13
0
    def scan(self):
        with PdfPages(self.output_filename + ".pdf") as output_pdf:
            if self.test_tdc_values:
                x, y, y_err = [], [], []
                tdc_hist = None

                self.fifo_readout.reset_sram_fifo()  # clear fifo data
                for pulse_width in [i for j in (range(10, 100, 5), range(100, 400, 10)) for i in j]:
                    logging.info("Test TDC for a pulse with of %d", pulse_width)
                    self.start_pulser(pulse_width, self.n_pulses)
                    time.sleep(self.n_pulses * pulse_width * 1e-9 + 0.1)
                    data = self.fifo_readout.read_data()
                    if data[is_tdc_word(data)].shape[0] != 0:
                        tdc_values = np.bitwise_and(data[is_tdc_word(data)], 0x00000FFF)
                        tdc_counter = np.bitwise_and(data[is_tdc_word(data)], 0x000FF000)
                        tdc_counter = np.right_shift(tdc_counter, 12)
                        if len(is_tdc_word(data)) != self.n_pulses:
                            logging.warning("%d TDC words instead of %d ", len(is_tdc_word(data)), self.n_pulses)
                        try:
                            if np.any(
                                np.logical_and(
                                    tdc_counter[np.gradient(tdc_counter) != 1] != 0,
                                    tdc_counter[np.gradient(tdc_counter) != 1] != 255,
                                )
                            ):
                                logging.warning("The counter did not count correctly")
                        except ValueError:
                            logging.warning("The counter did not count correctly")

                        x.append(pulse_width)
                        y.append(np.mean(tdc_values))
                        y_err.append(np.std(tdc_values))
                        if tdc_hist is None:
                            tdc_hist = np.histogram(tdc_values, range=(0, 1023), bins=1024)[0]
                        else:
                            tdc_hist += np.histogram(tdc_values, range=(0, 1023), bins=1024)[0]
                    else:
                        logging.warning("No TDC words, check connection!")

                plotting.plot_scatter(
                    x,
                    y,
                    y_err,
                    title="FPGA TDC linearity, " + str(self.n_pulses) + " each",
                    x_label="Pulse width [ns]",
                    y_label="TDC value",
                    filename=output_pdf,
                )
                plotting.plot_scatter(
                    x,
                    y_err,
                    title="FPGA TDC RMS, " + str(self.n_pulses) + " each",
                    x_label="Pulse width [ns]",
                    y_label="TDC RMS",
                    filename=output_pdf,
                )
                if tdc_hist is not None:
                    plotting.plot_tdc_counter(tdc_hist, title="All TDC values", filename=output_pdf)

            if self.test_trigger_delay:
                x, y, y_err, y2, y2_err = [], [], [], [], []
                self.fifo_readout.reset_sram_fifo()  # clear fifo data
                for pulse_delay in [i for j in (range(0, 100, 5), range(100, 500, 500)) for i in j]:
                    logging.info("Test TDC for a pulse delay of %d", pulse_delay)
                    for _ in range(10):
                        self.start_pulser(pulse_width=100, n_pulses=1, pulse_delay=pulse_delay)
                        time.sleep(0.1)
                    data = self.fifo_readout.read_data()
                    if data[is_tdc_word(data)].shape[0] != 0:
                        if len(is_tdc_word(data)) != 10:
                            logging.warning("%d TDC words instead of %d ", len(is_tdc_word(data)), 10)
                        tdc_values = np.bitwise_and(data[is_tdc_word(data)], 0x00000FFF)
                        tdc_delay = np.bitwise_and(data[is_tdc_word(data)], 0x0FF00000)
                        tdc_delay = np.right_shift(tdc_delay, 20)

                        x.append(pulse_delay)
                        y.append(np.mean(tdc_delay))
                        y_err.append(np.std(tdc_delay))
                        y2.append(np.mean(tdc_values))
                        y2_err.append(np.std(tdc_values))
                    else:
                        logging.warning("No TDC words, check connection!")

                plotting.plot_scatter(
                    x,
                    y2,
                    y2_err,
                    title="FPGA TDC for different delays, " + str(self.n_pulses) + " each",
                    x_label="Pulse delay [ns]",
                    y_label="TDC value",
                    filename=output_pdf,
                )
                plotting.plot_scatter(
                    x,
                    y,
                    y_err,
                    title="FPGA TDC trigger delay, " + str(10) + " each",
                    x_label="Pulse delay [ns]",
                    y_label="TDC trigger delay",
                    filename=output_pdf,
                )
                plotting.plot_scatter(
                    x,
                    y_err,
                    title="FPGA TDC trigger delay RMS, " + str(10) + " each",
                    x_label="Pulse delay [ns]",
                    y_label="TDC trigger delay RMS",
                    filename=output_pdf,
                )
def create_threshold_calibration(scan_base_file_name, create_plots=True):  # Create calibration function, can be called stand alone
    def analyze_raw_data_file(file_name):
        if os.path.isfile(file_name[:-3] + '_interpreted.h5'):  # skip analysis if already done
            logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.')
        else:
            with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_tot_pixel_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(False)  # RX errors would fill the console
                analyze_raw_data.interpret_word_table()

    def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values):
        logging.info("Storing calibration data in a table...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table)
        threshold_calib_table = out_file_h5.createTable(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table)
        for column in range(80):
            for row in range(336):
                for parameter_value_index, parameter_value in enumerate(parameter_values):
                    threshold_calib_table.row['column'] = column
                    threshold_calib_table.row['row'] = row
                    threshold_calib_table.row['parameter_value'] = parameter_value
                    threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index]
                    threshold_calib_table.row.append()
        for parameter_value_index, parameter_value in enumerate(parameter_values):
            mean_threshold_calib_table.row['parameter_value'] = parameter_value
            mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index]
            mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index]
            mean_threshold_calib_table.row.append()
        threshold_calib_table.flush()
        mean_threshold_calib_table.flush()
        logging.info("done")

    def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values):
        logging.info("Storing calibration data in an array...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table)
        mean_threshold_calib_rms_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table)
        threshold_calib_array = out_file_h5.createCArray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table)
        mean_threshold_calib_array[:] = mean_threshold_calibration
        mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
        threshold_calib_array[:] = threshold_calibration
        mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name]
        threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
        mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
        threshold_calib_array.attrs.scan_parameter_values = parameter_values

        logging.info("done")

    def mask_columns(pixel_array, ignore_columns):
        idx = np.array(ignore_columns) - 1  # from FE to Array columns
        m = np.zeros_like(pixel_array)
        m[:, idx] = 1
        return np.ma.masked_array(pixel_array, m)

    raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name, filter_file_words=['interpreted', 'calibration_calibration'])
    first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0]  # multilpe scan_base_file_names for multiple runs

    with tb.openFile(first_scan_base_file_name + '.h5', mode="r") as in_file_h5:  # deduce scan parameters from the first (and often only) scan base file name
        ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0]
        parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0]
        ignore_columns = ast.literal_eval(ignore_columns)
        parameter_name = ast.literal_eval(parameter_name)[1][0]

    calibration_file = first_scan_base_file_name + '_calibration'

    for raw_data_file in raw_data_files:  # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
        analyze_raw_data_file(raw_data_file)

    files_per_parameter = analysis_utils.get_parameter_value_from_file_names([file_name[:-3] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True)

    logging.info("Create calibration from data")
    mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8')

    if create_plots:
        logging.info('Saving calibration plots in: %s', calibration_file + '.pdf')
        output_pdf = PdfPages(calibration_file + '.pdf')

    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80)
    progress_bar.start()
    parameter_values = []
    for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()):
        parameter_values.append(parameters.values()[0][0])
        with tb.openFile(analyzed_data_file, mode="r") as in_file_h5:
            occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns)  # mask the not scanned columns for analysis and plotting
            thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns)
            if create_plots:
                plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf)
                plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
                plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked)
            threshold_calibration[:, :, index] = thresholds_masked.T
        progress_bar.update(index)
    progress_bar.finish()

    with tb.openFile(calibration_file + '.h5', mode="w") as out_file_h5:
        store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values)
        store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values)

    if create_plots:
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf)
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf)
        output_pdf.close()