Example #1
0
def main_pipeline(files,
                  aux_basepath,
                  max_events,
                  dark_filename,
                  integral_width,
                  debug,
                  hillas_filename,
                  parameters_filename,
                  picture_threshold,
                  boundary_threshold,
                  template_filename,
                  saturation_threshold,
                  threshold_pulse,
                  bad_pixels=None,
                  disable_bar=False):
    # get configuration
    with open(parameters_filename) as file:
        calibration_parameters = yaml.load(file)
    if bad_pixels is None:
        bad_pixels = get_bad_pixels(calib_file=parameters_filename,
                                    dark_histo=dark_filename,
                                    plot=None)
    pulse_template = NormalizedPulseTemplate.load(template_filename)
    pulse_area = pulse_template.integral() * u.ns
    ratio = pulse_template.compute_charge_amplitude_ratio(
        integral_width=integral_width, dt_sampling=4)  # ~ 0.24
    gain = np.array(calibration_parameters['gain'])  # ~ 20 LSB / p.e.
    gain_amplitude = gain * ratio
    crosstalk = np.array(calibration_parameters['mu_xt'])
    bias_resistance = 10 * 1E3 * u.Ohm  # 10 kOhm
    cell_capacitance = 50 * 1E-15 * u.Farad  # 50 fF
    geom = DigiCam.geometry
    dark_histo = Histogram1D.load(dark_filename)
    dark_baseline = dark_histo.mean()

    # define pipeline
    events = calibration_event_stream(files,
                                      max_events=max_events,
                                      disable_bar=disable_bar)
    events = add_slow_data_calibration(
        events,
        basepath=aux_basepath,
        aux_services=('DriveSystem', 'DigicamSlowControl', 'MasterSST1M',
                      'SafetyPLC', 'PDPSlowControl'))
    events = baseline.fill_dark_baseline(events, dark_baseline)
    events = baseline.fill_digicam_baseline(events)
    events = tagging.tag_burst_from_moving_average_baseline(events)
    events = baseline.compute_baseline_shift(events)
    events = baseline.subtract_baseline(events)
    events = filters.filter_clocked_trigger(events)
    events = baseline.compute_nsb_rate(events, gain_amplitude, pulse_area,
                                       crosstalk, bias_resistance,
                                       cell_capacitance)
    events = baseline.compute_gain_drop(events, bias_resistance,
                                        cell_capacitance)
    events = peak.find_pulse_with_max(events)
    events = charge.compute_dynamic_charge(
        events,
        integral_width=integral_width,
        saturation_threshold=saturation_threshold,
        threshold_pulse=threshold_pulse,
        debug=debug,
        pulse_tail=False,
    )
    events = charge.compute_photo_electron(events, gains=gain)
    events = charge.interpolate_bad_pixels(events, geom, bad_pixels)
    events = cleaning.compute_tailcuts_clean(
        events,
        geom=geom,
        overwrite=True,
        picture_thresh=picture_threshold,
        boundary_thresh=boundary_threshold,
        keep_isolated_pixels=False)
    events = cleaning.compute_boarder_cleaning(events, geom,
                                               boundary_threshold)
    events = cleaning.compute_dilate(events, geom)
    events = image.compute_hillas_parameters(events, geom)
    events = charge.compute_sample_photo_electron(events, gain_amplitude)
    events = cleaning.compute_3d_cleaning(events,
                                          geom,
                                          n_sample=50,
                                          threshold_sample_pe=20,
                                          threshold_time=2.1 * u.ns,
                                          threshold_size=0.005 * u.mm)
    # create pipeline output file
    output_file = Serializer(hillas_filename, mode='w', format='fits')
    data_to_store = PipelineOutputContainer()
    for event in events:
        if debug:
            print(event.hillas)
            print(event.data.nsb_rate)
            print(event.data.gain_drop)
            print(event.data.baseline_shift)
            print(event.data.border)
            plot_array_camera(np.max(event.data.adc_samples, axis=-1))
            plot_array_camera(
                np.nanmax(event.data.reconstructed_charge, axis=-1))
            plot_array_camera(event.data.cleaning_mask.astype(float))
            plot_array_camera(event.data.reconstructed_number_of_pe)
            plt.show()
        # fill container
        data_to_store.local_time = event.data.local_time
        data_to_store.event_type = event.event_type
        data_to_store.event_id = event.event_id
        data_to_store.az = event.slow_data.DriveSystem.current_position_az
        data_to_store.el = event.slow_data.DriveSystem.current_position_el

        r = event.hillas.r
        phi = event.hillas.phi
        psi = event.hillas.psi
        alpha = compute_alpha(phi.value, psi.value) * psi.unit
        data_to_store.alpha = alpha
        data_to_store.miss = compute_miss(r=r.value, alpha=alpha.value)
        data_to_store.miss = data_to_store.miss * r.unit
        data_to_store.baseline = np.mean(event.data.digicam_baseline)
        data_to_store.nsb_rate = np.mean(event.data.nsb_rate)
        temp_crate1 = event.slow_data.DigicamSlowControl.Crate1_T
        temp_crate2 = event.slow_data.DigicamSlowControl.Crate2_T
        temp_crate3 = event.slow_data.DigicamSlowControl.Crate3_T
        temp_digicam = np.array(
            np.hstack([temp_crate1, temp_crate2, temp_crate3]))
        temp_digicam_mean = np.mean(temp_digicam[np.logical_and(
            temp_digicam > 0, temp_digicam < 60)])
        data_to_store.digicam_temperature = temp_digicam_mean
        temp_sector1 = event.slow_data.PDPSlowControl.Sector1_T
        temp_sector2 = event.slow_data.PDPSlowControl.Sector2_T
        temp_sector3 = event.slow_data.PDPSlowControl.Sector3_T
        temp_pdp = np.array(
            np.hstack([temp_sector1, temp_sector2, temp_sector3]))
        temp_pdp_mean = np.mean(temp_pdp[np.logical_and(
            temp_pdp > 0, temp_pdp < 60)])
        data_to_store.pdp_temperature = temp_pdp_mean
        target_radec = event.slow_data.MasterSST1M.target_radec
        data_to_store.target_ra = target_radec[0]
        data_to_store.target_dec = target_radec[1]
        status_leds = event.slow_data.SafetyPLC.SPLC_CAM_Status
        # bit 8 of status_LEDs is about on/off, bit 9 about blinking
        data_to_store.pointing_leds_on = bool((status_leds & 1 << 8) >> 8)
        data_to_store.pointing_leds_blink = bool((status_leds & 1 << 9) >> 9)
        hv_sector1 = event.slow_data.PDPSlowControl.Sector1_HV
        hv_sector2 = event.slow_data.PDPSlowControl.Sector2_HV
        hv_sector3 = event.slow_data.PDPSlowControl.Sector3_HV
        hv_pdp = np.array(np.hstack([hv_sector1, hv_sector2, hv_sector3]),
                          dtype=bool)
        data_to_store.all_hv_on = np.all(hv_pdp)
        ghv_sector1 = event.slow_data.PDPSlowControl.Sector1_GHV
        ghv_sector2 = event.slow_data.PDPSlowControl.Sector2_GHV
        ghv_sector3 = event.slow_data.PDPSlowControl.Sector3_GHV
        ghv_pdp = np.array(np.hstack([ghv_sector1, ghv_sector2, ghv_sector3]),
                           dtype=bool)
        data_to_store.all_ghv_on = np.all(ghv_pdp)
        is_on_source = bool(event.slow_data.DriveSystem.is_on_source)
        data_to_store.is_on_source = is_on_source
        is_tracking = bool(event.slow_data.DriveSystem.is_tracking)
        data_to_store.is_tracking = is_tracking
        data_to_store.shower = bool(event.data.shower)
        data_to_store.border = bool(event.data.border)
        data_to_store.burst = bool(event.data.burst)
        data_to_store.saturated = bool(event.data.saturated)
        for key, val in event.hillas.items():
            data_to_store[key] = val
        output_file.add_container(data_to_store)
    try:
        output_file.close()
        print(hillas_filename, 'created.')
    except ValueError:
        print('WARNING: no data to save,', hillas_filename, 'not created.')
Example #2
0
def data_quality(
        files, dark_filename, time_step, fits_filename, load_files,
        histo_filename, rate_plot_filename, baseline_plot_filename,
        nsb_plot_filename, parameters_filename, template_filename,
        aux_basepath, threshold_sample_pe=20.,
        bias_resistance=1e4 * u.Ohm, cell_capacitance=5e-14 * u.Farad,
        disable_bar=False, aux_services=('DriveSystem',)

):
    input_dir = np.unique([os.path.dirname(file) for file in files])
    if len(input_dir) > 1:
        raise AttributeError("input files must be from the same directories")
    input_dir = input_dir[0]
    if aux_basepath.lower() == "search":
        aux_basepath = input_dir.replace('/raw/', '/aux/')
        print("auxiliary files are expected in", aux_basepath)
    with open(parameters_filename) as file:
        calibration_parameters = yaml.load(file)

    pulse_template = NormalizedPulseTemplate.load(template_filename)
    pulse_area = pulse_template.integral() * u.ns
    gain_integral = np.array(calibration_parameters['gain'])

    charge_to_amplitude = pulse_template.compute_charge_amplitude_ratio(7, 4)
    gain_amplitude = gain_integral * charge_to_amplitude
    crosstalk = np.array(calibration_parameters['mu_xt'])
    pixel_id = np.arange(1296)
    n_pixels = len(pixel_id)
    dark_histo = Histogram1D.load(dark_filename)
    dark_baseline = dark_histo.mean()
    if not load_files:
        events = calibration_event_stream(files, disable_bar=disable_bar)
        events = add_slow_data_calibration(
            events, basepath=aux_basepath, aux_services=aux_services
        )
        events = fill_digicam_baseline(events)
        events = fill_dark_baseline(events, dark_baseline)
        events = subtract_baseline(events)
        events = compute_baseline_shift(events)
        events = compute_nsb_rate(
            events, gain_amplitude, pulse_area, crosstalk, bias_resistance,
            cell_capacitance
        )
        events = compute_gain_drop(events, bias_resistance, cell_capacitance)
        events = compute_sample_photo_electron(events, gain_amplitude)
        events = tag_burst_from_moving_average_baseline(
            events, n_previous_events=100, threshold_lsb=5
        )
        events = compute_3d_cleaning(events, geom=DigiCam.geometry,
                                     threshold_sample_pe=threshold_sample_pe)
        init_time = 0
        baseline = 0
        count = 0
        shower_count = 0
        az = 0
        el = 0
        container = DataQualityContainer()
        file = Serializer(fits_filename, mode='w', format='fits')
        baseline_histo = Histogram1D(
            data_shape=(n_pixels,),
            bin_edges=np.arange(4096)
        )
        for i, event in enumerate(events):
            new_time = event.data.local_time
            if init_time == 0:
                init_time = new_time
            count += 1
            baseline += np.mean(event.data.digicam_baseline)
            az += event.slow_data.DriveSystem.current_position_az
            el += event.slow_data.DriveSystem.current_position_el
            time_diff = new_time - init_time
            if event.data.shower:
                shower_count += 1
            baseline_histo.fill(event.data.digicam_baseline.reshape(-1, 1))
            if time_diff > time_step and i > 0:
                trigger_rate = count / time_diff
                shower_rate = shower_count / time_diff
                baseline = baseline / count
                az = az / count
                el = el / count
                container.trigger_rate = trigger_rate
                container.baseline = baseline
                container.time = (new_time + init_time) / 2
                container.shower_rate = shower_rate
                container.burst = event.data.burst
                nsb_rate = event.data.nsb_rate
                container.nsb_rate = np.nanmean(nsb_rate).value
                container.current_position_az = az
                container.current_position_el = el
                baseline = 0
                count = 0
                init_time = 0
                shower_count = 0
                az = 0
                el = 0
                file.add_container(container)
        output_path = os.path.dirname(histo_filename)
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        baseline_histo.save(histo_filename)
        print(histo_filename, 'created.')
        file.close()
        print(fits_filename, 'created.')

    data = Table.read(fits_filename, format='fits')
    data = data.to_pandas()
    data['time'] = pd.to_datetime(data['time'], utc=True)
    data = data.set_index('time')

    if rate_plot_filename is not None:
        fig1 = plt.figure()
        ax = plt.gca()
        plt.xticks(rotation=70)
        plt.plot(data['trigger_rate']*1E9, '.', label='trigger rate')
        plt.plot(data['shower_rate']*1E9, '.', label='shower_rate')
        plt.ylabel('rate [Hz]')
        plt.legend({'trigger rate', 'shower rate'})
        xlim = plt.xlim()
        plt.xlim(xlim[0] - 1e-3, xlim[1] + 1e-3)  # extra min on the sides
        if rate_plot_filename == "show":
            plt.show()
        else:
            output_path = os.path.dirname(rate_plot_filename)
            if not (output_path == '' or os.path.exists(output_path)):
                os.makedirs(output_path)
            plt.savefig(rate_plot_filename)
        plt.close(fig1)

    if baseline_plot_filename is not None:
        fig2 = plt.figure(figsize=(8, 6))
        ax = plt.gca()
        data_burst = data[data['burst']]
        data_good = data[~data['burst']]
        plt.xticks(rotation=70)
        plt.plot(data_good['baseline'], '.', label='good', ms=2)
        plt.plot(data_burst['baseline'], '.', label='burst', ms=2)
        plt.ylabel('Baseline [LSB]')
        xlim = plt.xlim()
        plt.xlim(xlim[0] - 1e-3, xlim[1] + 1e-3)  # extra min on the sides
        if rate_plot_filename == "show":
            plt.show()
        else:
            output_path = os.path.dirname(baseline_plot_filename)
            if not (output_path == '' or os.path.exists(output_path)):
                os.makedirs(output_path)
            plt.savefig(baseline_plot_filename)
        plt.close(fig2)

    if nsb_plot_filename is not None:
        fig3 = plt.figure()
        ax = fig3.add_subplot(111)
        data.plot(y='nsb_rate', ax=ax)
        ax.set_ylabel('$f_{NSB}$ [GHz]')

        if nsb_plot_filename == "show":
            plt.show()
        else:
            fig3.savefig(nsb_plot_filename)
        plt.close(fig3)

    return
Example #3
0
def analyse_acdc_level(files,
                       max_events=None,
                       delay_step_ns=0.1,
                       delay_range_ns=(-4., 4.),
                       time_range_ns=(-9., 39.),
                       sampling_ns=4.,
                       normalize_range=(-3, 4),
                       parameters=parameters_default,
                       template=template_default,
                       adc_noise=1):
    with open(parameters) as parameters_file:
        calibration_parameters = yaml.load(parameters_file)
    gain_pixels = np.array(calibration_parameters['gain'])
    normalize_slice = np.arange(normalize_range[0],
                                normalize_range[1] + 1,
                                dtype=int)
    sample_template = np.arange(time_range_ns[0], time_range_ns[1],
                                sampling_ns)
    n_sample_template = len(sample_template)
    template = NormalizedPulseTemplate.load(template)
    delays = np.arange(delay_range_ns[0], delay_range_ns[1], delay_step_ns)
    n_delays = len(delays)
    templates_ampl = np.zeros([n_delays, n_sample_template])
    templates_std = np.zeros([n_delays, n_sample_template])
    index_max_template = np.zeros(n_delays, dtype=int)
    for i, delay in enumerate(delays):
        ampl_templ = template(sample_template + delay)
        std_templ = template.std(sample_template + delay)
        index_max_template[i] = np.argmax(ampl_templ)
        range_integ = index_max_template[i] + normalize_slice
        norm_templ = np.sum(ampl_templ[range_integ])
        templates_ampl[i, :] = ampl_templ / norm_templ
        templates_std[i, :] = std_templ / norm_templ
    max_ampl_one_pe = np.max(templates_ampl)
    events = calibration_event_stream(files,
                                      max_events=max_events,
                                      disable_bar=True)
    events = fill_digicam_baseline(events)
    events = subtract_baseline(events)
    rows_norm = np.tile(
        np.arange(1296, dtype=int)[:, None], [1, len(normalize_slice)])
    samples_events = None
    t_fit = []
    charge = []
    for event in events:
        if samples_events is None:
            n_sample = event.data.adc_samples.shape[1]
            samples_events = np.arange(n_sample) * sampling_ns
        adc_samples = event.data.adc_samples
        idx_sample_max = np.argmax(adc_samples, axis=1)
        column_norm = normalize_slice[None, :] + idx_sample_max[:, None]
        # we skip pixels with max too close to the limit of the sampling window
        # to be sure to be able to integrate and get normalization
        good_pix = np.logical_and(np.all(column_norm < n_sample, axis=1),
                                  np.all(column_norm >= 0, axis=1))
        # we skip pixels with max too close to the limit of the sampling window
        # to be sure to be able to compare with full template
        mean_index_max_template = int(np.round(np.mean(index_max_template)))
        index_template_rel = idx_sample_max - mean_index_max_template
        good_pix = np.logical_and(good_pix, index_template_rel >= 0)
        good_pix = np.logical_and(
            good_pix, index_template_rel + n_sample_template - 1 < n_sample)
        # we discard pixels with less that few pe
        good_pix = np.logical_and(
            good_pix,
            np.max(adc_samples, axis=1) > 3.5 / max_ampl_one_pe)
        # discard pixels with max pulse not around the right position
        good_pix = np.logical_and(good_pix, idx_sample_max >= 15)
        good_pix = np.logical_and(good_pix, idx_sample_max <= 16)
        sample_norm = adc_samples[rows_norm[good_pix, :],
                                  column_norm[good_pix, :]]
        norm_pixels = np.sum(sample_norm, axis=1)
        # discard pixels where charge is <= 2.5 LSB (0.5 pe), as normalization
        # is then meaningless
        norm_all = np.zeros(1296)
        norm_all[good_pix] = norm_pixels
        good_pix = np.logical_and(good_pix, norm_all > 2.5)
        norm_pixels = norm_pixels[norm_pixels > 2.5]

        charge_good_pix = norm_pixels / gain_pixels[good_pix]
        adc_samples_norm = adc_samples[good_pix, :] / norm_pixels[:, None]
        n_good_pix = int(np.sum(good_pix))
        samples = np.arange(n_sample_template, dtype=int)[None, None, :]
        column_chi2 = index_template_rel[good_pix, None, None] + samples
        row_chi2 = np.tile(
            np.arange(n_good_pix)[:, None, None],
            [1, n_delays, n_sample_template])
        adc_samples_compared = adc_samples_norm[row_chi2, column_chi2]
        residual = adc_samples_compared - templates_ampl[None, :, :]
        error_squared = templates_std[None, :, :] ** 2 \
            + (adc_noise/norm_pixels[:, None, None]) ** 2
        chi2 = np.sum(residual**2 / error_squared, axis=2) \
            / (n_sample_template - 1)

        t_fit_all = np.ones(1296) * np.nan
        # estimate offset from min chi2
        idx_delay_min = np.argmin(chi2, axis=1)
        delays_min = delays[idx_delay_min]
        delays_min[chi2[np.arange(n_good_pix), idx_delay_min] > 20] = np.nan
        t_fit_all[good_pix] = delays_min

        t_fit.append(-t_fit_all + idx_sample_max * sampling_ns)
        charge_all = np.ones(1296) * np.nan
        charge_all[good_pix] = charge_good_pix
        charge.append(charge_all)
    t_fit = np.array(t_fit)
    charge = np.array(charge)
    return charge, t_fit
Example #4
0
def entry():
    args = docopt(__doc__)
    files = args['<INPUT>']
    debug = args['--debug']

    max_events = convert_int(args['--max_events'])
    results_filename = args['--fit_output']
    dir_output = os.path.dirname(results_filename)

    if not os.path.exists(dir_output):
        raise IOError('Path {} for output '
                      'does not exists \n'.format(dir_output))

    pixel_ids = convert_pixel_args(args['--pixel'])
    integral_width = int(args['--integral_width'])
    shift = int(args['--shift'])
    bin_width = int(args['--bin_width'])
    ncall = int(args['--ncall'])
    ac_levels = convert_list_int(args['--ac_levels'])
    n_pixels = len(pixel_ids)
    n_ac_levels = len(ac_levels)
    adc_min = int(args['--adc_min'])
    adc_max = int(args['--adc_max'])

    timing_filename = args['--timing']
    timing = np.load(timing_filename)['time']

    charge_histo_filename = args['--compute_output']
    fmpe_results_filename = args['--gain']

    if args['--compute']:

        if n_ac_levels != len(files):
            raise ValueError('n_ac_levels = {} != '
                             'n_files = {}'.format(n_ac_levels, len(files)))

        time = np.zeros((n_ac_levels, n_pixels))

        charge_histo = Histogram1D(bin_edges=np.arange(
            adc_min * integral_width, adc_max * integral_width, bin_width),
                                   data_shape=(
                                       n_ac_levels,
                                       n_pixels,
                                   ))

        if os.path.exists(charge_histo_filename):
            raise IOError(
                'File {} already exists'.format(charge_histo_filename))

        for i, (file, ac_level) in tqdm(enumerate(zip(files, ac_levels)),
                                        total=n_ac_levels,
                                        desc='DAC level',
                                        leave=False):

            time[i] = timing[pixel_ids]
            pulse_indices = time[i] // 4

            events = calibration_event_stream(file,
                                              pixel_id=pixel_ids,
                                              max_events=max_events)
            # events = compute_baseline_with_min(events)
            events = fill_digicam_baseline(events)
            events = subtract_baseline(events)
            # events = find_pulse_with_max(events)
            events = fill_pulse_indices(events, pulse_indices)
            events = compute_charge(events, integral_width, shift)
            events = compute_amplitude(events)

            for event in events:
                charge_histo.fill(event.data.reconstructed_charge, indices=i)

        charge_histo.save(charge_histo_filename, )

    if args['--fit']:

        input_parameters = Table.read(fmpe_results_filename, format='fits')
        input_parameters = input_parameters.to_pandas()

        gain = np.zeros((n_ac_levels, n_pixels)) * np.nan
        sigma_e = np.zeros((n_ac_levels, n_pixels)) * np.nan
        sigma_s = np.zeros((n_ac_levels, n_pixels)) * np.nan
        baseline = np.zeros((n_ac_levels, n_pixels)) * np.nan
        mu = np.zeros((n_ac_levels, n_pixels)) * np.nan
        mu_xt = np.zeros((n_ac_levels, n_pixels)) * np.nan
        amplitude = np.zeros((n_ac_levels, n_pixels)) * np.nan

        gain_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        sigma_e_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        sigma_s_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        baseline_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        mu_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        mu_xt_error = np.zeros((n_ac_levels, n_pixels)) * np.nan
        amplitude_error = np.zeros((n_ac_levels, n_pixels)) * np.nan

        mean = np.zeros((n_ac_levels, n_pixels)) * np.nan
        std = np.zeros((n_ac_levels, n_pixels)) * np.nan

        chi_2 = np.zeros((n_ac_levels, n_pixels)) * np.nan
        ndf = np.zeros((n_ac_levels, n_pixels)) * np.nan

        ac_limit = [np.inf] * n_pixels

        charge_histo = Histogram1D.load(charge_histo_filename)

        for i, ac_level in tqdm(enumerate(ac_levels),
                                total=n_ac_levels,
                                desc='DAC level',
                                leave=False):

            for j, pixel_id in tqdm(enumerate(pixel_ids),
                                    total=n_pixels,
                                    desc='Pixel',
                                    leave=False):

                histo = charge_histo[i, pixel_id]

                mean[i, j] = histo.mean()
                std[i, j] = histo.std()

                if histo.overflow > 0 or histo.data.sum() == 0:
                    continue

                fit_params_names = describe(mpe_distribution_general)
                options = {'fix_n_peaks': True}
                fixed_params = {}

                for param in fit_params_names:

                    if param in input_parameters.keys():
                        name = 'fix_' + param

                        options[name] = True
                        fixed_params[param] = input_parameters[param][pixel_id]

                if i > 0:

                    if mu[i - 1, j] > 5:
                        ac_limit[j] = min(i, ac_limit[j])
                        ac_limit[j] = int(ac_limit[j])

                        weights_fit = chi_2[:ac_limit[j], j]
                        weights_fit = weights_fit / ndf[:ac_limit[j], j]

                        options['fix_mu_xt'] = True

                        temp = mu_xt[:ac_limit[j], j] * weights_fit
                        temp = np.nansum(temp)
                        temp = temp / np.nansum(weights_fit)
                        fixed_params['mu_xt'] = temp

                try:

                    fitter = MPEFitter(histogram=histo,
                                       cost='MLE',
                                       pedantic=0,
                                       print_level=0,
                                       throw_nan=True,
                                       fixed_params=fixed_params,
                                       **options)

                    fitter.fit(ncall=ncall)

                    if debug:
                        x_label = '[LSB]'
                        label = 'Pixel {}'.format(pixel_id)
                        fitter.draw(legend=False, x_label=x_label, label=label)
                        fitter.draw_init(legend=False,
                                         x_label=x_label,
                                         label=label)
                        fitter.draw_fit(legend=False,
                                        x_label=x_label,
                                        label=label)
                        plt.show()

                    param = fitter.parameters
                    param_err = fitter.errors
                    gain[i, j] = param['gain']
                    sigma_e[i, j] = param['sigma_e']
                    sigma_s[i, j] = param['sigma_s']
                    baseline[i, j] = param['baseline']
                    mu[i, j] = param['mu']
                    mu_xt[i, j] = param['mu_xt']
                    amplitude[i, j] = param['amplitude']

                    gain_error[i, j] = param_err['gain']
                    sigma_e_error[i, j] = param_err['sigma_e']
                    sigma_s_error[i, j] = param_err['sigma_s']
                    baseline_error[i, j] = param_err['baseline']
                    mu_error[i, j] = param_err['mu']
                    mu_xt_error[i, j] = param_err['mu_xt']
                    amplitude_error[i, j] = param_err['amplitude']

                    chi_2[i, j] = fitter.fit_test() * fitter.ndf
                    ndf[i, j] = fitter.ndf

                except Exception as e:

                    print(e)
                    print('Could not fit pixel {} for DAC level {}'.format(
                        pixel_id, ac_level))

        np.savez(
            results_filename,
            gain=gain,
            sigma_e=sigma_e,
            sigma_s=sigma_s,
            baseline=baseline,
            mu=mu,
            mu_xt=mu_xt,
            gain_error=gain_error,
            sigma_e_error=sigma_e_error,
            sigma_s_error=sigma_s_error,
            baseline_error=baseline_error,
            mu_error=mu_error,
            mu_xt_error=mu_xt_error,
            chi_2=chi_2,
            ndf=ndf,
            pixel_ids=pixel_ids,
            ac_levels=ac_levels,
            amplitude=amplitude,
            amplitude_error=amplitude_error,
            mean=mean,
            std=std,
        )

    if args['--save_figures']:

        pass

    if args['--display']:

        charge_histo = Histogram1D.load(charge_histo_filename)
        charge_histo.draw(index=(0, 0), log=False, legend=False)

        pass

    return
Example #5
0
def compute(files, ac_levels, dc_levels, output_filename, max_events, pixels,
            integral_width, timing, saturation_threshold, pulse_tail, debug):

    n_pixels = len(pixels)
    n_files = len(files)
    n_ac_level = len(ac_levels)
    n_dc_level = len(dc_levels)

    assert n_files == (n_ac_level * n_dc_level)

    for file in files:
        assert os.path.exists(file)

    assert not os.path.exists(output_filename)

    shape = (len(dc_levels), len(ac_levels), n_pixels)
    amplitude_mean = np.zeros(shape)
    amplitude_std = np.zeros(shape)
    baseline_mean = np.zeros(shape)
    baseline_std = np.zeros(shape)
    charge_mean = np.zeros(shape)
    charge_std = np.zeros(shape)

    for i, dc_level, in tqdm(enumerate(dc_levels), total=n_dc_level):

        for j, ac_level in tqdm(enumerate(ac_levels), total=n_ac_level):

            index_file = i * n_ac_level + j
            file = files[index_file]

            events = calibration_event_stream(file, max_events=max_events)
            events = fill_digicam_baseline(events)
            events = subtract_baseline(events)
            # events = compute_charge_with_saturation(events, integral_width=7)

            events = compute_charge_with_saturation_and_threshold(
                events,
                integral_width=integral_width,
                debug=debug,
                trigger_bin=timing,
                saturation_threshold=saturation_threshold,
                pulse_tail=pulse_tail)
            # events = compute_maximal_charge(events)

            for n, event in enumerate(events):

                charge_mean[i, j] += event.data.reconstructed_charge
                amplitude_mean[i, j] += event.data.reconstructed_amplitude

                charge_std[i, j] += event.data.reconstructed_charge**2
                amplitude_std[i, j] += event.data.reconstructed_amplitude**2

                baseline_mean[i, j] += event.data.baseline
                baseline_std[i, j] += event.data.baseline**2

            charge_mean[i, j] = charge_mean[i, j] / (n + 1)
            charge_std[i, j] = charge_std[i, j] / (n + 1)
            charge_std[i, j] = np.sqrt(charge_std[i, j] - charge_mean[i, j]**2)
            amplitude_mean[i, j] = amplitude_mean[i, j] / (n + 1)
            amplitude_std[i, j] = amplitude_std[i, j] / (n + 1)
            amplitude_std[i, j] = np.sqrt(amplitude_std[i, j] -
                                          amplitude_mean[i, j]**2)
            baseline_mean[i, j] = baseline_mean[i, j] / (n + 1)
            baseline_std[i, j] = baseline_std[i, j] / (n + 1)
            baseline_std[i, j] = np.sqrt(baseline_std[i, j] -
                                         baseline_mean[i, j]**2)

    np.savez(output_filename,
             charge_mean=charge_mean,
             charge_std=charge_std,
             amplitude_mean=amplitude_mean,
             amplitude_std=amplitude_std,
             ac_levels=ac_levels,
             dc_levels=dc_levels,
             baseline_mean=baseline_mean,
             baseline_std=baseline_std)
Example #6
0
def compute(files,
            pixel_id,
            max_events,
            pulse_indices,
            integral_width,
            shift,
            bin_width,
            charge_histo_filename='charge_histo.pk',
            amplitude_histo_filename='amplitude_histo.pk',
            save=True):
    if os.path.exists(charge_histo_filename) and save:

        raise IOError('File {} already exists'.format(charge_histo_filename))

    elif os.path.exists(charge_histo_filename):

        charge_histo = Histogram1D.load(charge_histo_filename)

    if os.path.exists(amplitude_histo_filename) and save:

        raise IOError(
            'File {} already exists'.format(amplitude_histo_filename))

    elif os.path.exists(amplitude_histo_filename):

        amplitude_histo = Histogram1D.load(amplitude_histo_filename)

    if (not os.path.exists(amplitude_histo_filename)) or \
            (not os.path.exists(charge_histo_filename)):

        n_pixels = len(pixel_id)

        events = calibration_event_stream(files,
                                          pixel_id=pixel_id,
                                          max_events=max_events)
        # events = compute_baseline_with_min(events)
        events = fill_digicam_baseline(events)
        events = subtract_baseline(events)
        # events = find_pulse_with_max(events)
        events = fill_pulse_indices(events, pulse_indices)
        events = compute_charge(events, integral_width, shift)
        events = compute_amplitude(events)

        charge_histo = Histogram1D(data_shape=(n_pixels, ),
                                   bin_edges=np.arange(-40 * integral_width,
                                                       4096 * integral_width,
                                                       bin_width))

        amplitude_histo = Histogram1D(data_shape=(n_pixels, ),
                                      bin_edges=np.arange(-40, 4096, 1))

        for event in events:

            charge_histo.fill(event.data.reconstructed_charge)
            amplitude_histo.fill(event.data.reconstructed_amplitude)

        if save:
            charge_histo.save(charge_histo_filename)
            amplitude_histo.save(amplitude_histo_filename)

    return amplitude_histo, charge_histo
def compute(files, ac_levels, dc_levels, output_filename, dark_charge, dark_baseline,
            max_events, pixels, integral_width, timing, saturation_threshold, pulse_tail, debug):



    directory = '/sst1m/analyzed/calib/mpe/'
    file_calib = os.path.join(directory, 'mpe_fit_results_combined.npz')
    data_calib = dict(np.load(file_calib))

    pe = data_calib['mu']
    pe_err = data_calib['mu_error']
    ac = data_calib['ac_levels'][:, 0]
    ac_led = ACLED(ac, pe, pe_err)
    pde = 0.9  # window filter
    true_pe = ac_led(ac_levels).T * pde
    # mask = true_pe < 5
    # true_pe[mask] = pe[mask]

    n_pixels = len(pixels)
    n_ac_level = len(ac_levels)
    n_dc_level = len(dc_levels)
    n_files = len(files)

    assert n_files == (n_ac_level * n_dc_level)

    debug = False
    pulse_tail = False
    shape = (n_dc_level, n_ac_level, n_pixels)
    nsb_mean = np.zeros(shape)
    nsb_std = np.zeros(shape)
    pe_mean = np.zeros(shape)
    pe_std = np.zeros(shape)


    print(dark_baseline, dark_charge)
    pe_interpolator = lambda x: charge_to_pe(x, dark_charge, true_pe)

    for i, dc_level, in tqdm(enumerate(dc_levels), total=n_dc_level):

        for j, ac_level in tqdm(enumerate(ac_levels), total=n_ac_level):

            index_file = i * n_ac_level + j
            file = files[index_file]
            events = calibration_event_stream(file, max_events=max_events)
            events = fill_dark_baseline(events, dark_baseline)
            events = fill_digicam_baseline(events)
            events = compute_baseline_shift(events)
            events = subtract_baseline(events)
            # events = compute_nsb_rate(events, gain, pulse_area, crosstalk,
            #                           bias_resistance, cell_capacitance)
            # events = compute_charge_with_saturation(events, integral_width=7)
            events = compute_charge_with_saturation_and_threshold(events,
                                                                  integral_width=integral_width,
                                                                  debug=debug,
                                                                  trigger_bin=timing,
                                                                  saturation_threshold=saturation_threshold,
                                                                  pulse_tail=pulse_tail)

            events = compute_number_of_pe_from_table(events, pe_interpolator)
            events = rescale_pulse(events, gain_func=_gain_drop_from_baseline_shift,
                                   xt_func=_crosstalk_drop_from_baseline_shift,
                                   pde_func=_pde_drop_from_baseline_shift)
            # events = compute_maximal_charge(events)

            for n, event in enumerate(events):

                pe_mean[i, j] += event.data.reconstructed_number_of_pe
                pe_std[i, j] += event.data.reconstructed_number_of_pe**2
                # nsb_mean[i] += event.data.nsb_rate
                # nsb_std[i] += event.data.nsb_rate**2
                # print(event.data.baseline_shift)

            pe_mean[i, j] = pe_mean[i, j] / (n + 1)
            # nsb_mean[i] = nsb_mean[i] / (n + 1)
            pe_std[i, j] = pe_std[i, j] / (n + 1)
            pe_std[i, j] = np.sqrt(pe_std[i, j] - pe_mean[i, j]**2)
            # nsb_std[i] = nsb_std[i] / (n + 1)
            # nsb_std[i] = np.sqrt(nsb_std[i] - nsb_mean[i]**2)

    np.savez(output_filename, pe_reco_mean=pe_mean, pe_reco_std=pe_std,
             ac_levels=ac_levels, pe=pe, pe_err=pe_err, true_pe=true_pe,
             nsb_mean=nsb_mean, nsb_std=nsb_std)
    timing = np.load('/sst1m/analyzed/calib/timing/timing.npz')
    timing = timing['time'] // 4

    data_1 = dict(np.load(filename_1_dark))
    dark_baseline = data_1['baseline_mean'][0]
    charge_mean = data_1['charge_mean']

    print(dark_baseline)
    pe_interpolator = lambda x: charge_to_pe(x, charge_mean, true_pe)

    for i, file in tqdm(enumerate(files), total=n_files):

        events = calibration_event_stream(file, max_events=max_events)
        events = fill_dark_baseline(events, dark_baseline)
        events = fill_digicam_baseline(events)
        events = compute_baseline_shift(events)
        events = subtract_baseline(events)
        # events = compute_nsb_rate(events, gain, pulse_area, crosstalk,
        #                           bias_resistance, cell_capacitance)
        # events = compute_charge_with_saturation(events, integral_width=7)
        events = compute_charge_with_saturation_and_threshold(events,
                                                              integral_width=integral_width,
                                                              debug=debug,
                                                              trigger_bin=timing,
                                                              saturation_threshold=saturation_threshold,
                                                              pulse_tail=pulse_tail)

        events = compute_number_of_pe_from_table(events, pe_interpolator)
        events = rescale_pulse(events, gain_func=_gain_drop_from_baseline_shift,
                               xt_func=_crosstalk_drop_from_baseline_shift,
Example #9
0
def main(
        input_files,
        output_hist,
        delays_ns=None,
        time_range_ns=(-10., 40.),
        amplitude_range=(-0.1, 0.4),
        integration_range=None,
        # charge < 50 pe (noisy) or > 500 pe (saturation) => bad_charge
        # 1 pe <=> 20 LSB integral
        charge_range=(1000., 8000.),
        n_bin=100,
        disable_bar=False):
    if delays_ns is not None:
        assert len(delays_ns) == len(input_files)
    charge_min = np.min(charge_range)
    charge_max = np.max(charge_range)
    if integration_range is not None:
        integration_min = np.min(integration_range)
        integration_max = np.max(integration_range)
    histo = None
    n_sample = 0
    n_pixel = 0
    for file_idx, input_file in enumerate(input_files):
        if not os.path.isfile(input_file):
            continue
        events = calibration_event_stream([input_file],
                                          disable_bar=disable_bar)
        events = fill_digicam_baseline(events)
        if "SST1M_01_201805" in input_files[0]:  # fix data in May
            print("WARNING: correction of the baselines applied.")
            events = correct_wrong_baseline(events)
        events = subtract_baseline(events)
        for e in events:
            adc = e.data.adc_samples
            if integration_range is not None:
                adc_interp = adc[:, slice(integration_min, integration_max)]
            else:
                adc_interp = adc
            integral = adc_interp.sum(axis=1)
            adc_norm = adc / integral[:, None]
            if delays_ns is None:
                arrival_time_in_ns = estimate_time_from_leading_edge(adc) * 4
            else:
                arrival_time_in_ns = delays_ns[file_idx] * np.ones(1296)
            if histo is None:
                n_pixel, n_sample = adc_norm.shape
                histo = Histogram2dChunked(
                    shape=(n_pixel, n_bin, n_bin),
                    range=[time_range_ns, amplitude_range])
            else:
                assert adc_norm.shape[0] == n_pixel
                assert adc_norm.shape[1] == n_sample
            time_in_ns = np.arange(n_sample) * 4
            bad_charge = np.logical_or(integral < charge_min,
                                       integral > charge_max)
            arrival_time_in_ns[bad_charge] = -np.inf  # ignored by histo
            histo.fill(x=time_in_ns[None, :] - arrival_time_in_ns[:, None],
                       y=adc_norm)
    if os.path.exists(output_hist):
        os.remove(output_hist)
    histo.save(output_hist)
    print('2D histogram of pulse shape for all pixel saved as', output_hist)
Example #10
0
def get_burst(files,
              plot_baseline="show",
              n_previous_events=100,
              threshold_lsb=2.,
              output=None,
              expand=10,
              merge_sec=5.,
              video_prefix=None,
              disable_bar=False):
    # get events info
    events = calibration_event_stream(files, disable_bar=disable_bar)
    events = fill_digicam_baseline(events)
    events = tag_burst_from_moving_average_baseline(
        events,
        n_previous_events=n_previous_events,
        threshold_lsb=threshold_lsb)
    n_event = 0
    timestamps = []
    event_ids = []
    are_burst = []
    baselines = []
    for event in events:
        n_event += 1
        timestamps.append(event.data.local_time)
        event_ids.append(event.event_id)
        are_burst.append(event.data.burst)
        baselines.append(np.mean(event.data.digicam_baseline))
    timestamps = np.array(timestamps)
    event_ids = np.array(event_ids)
    are_burst = np.array(are_burst)
    baselines = np.array(baselines)

    # plot history of the baselines
    if plot_baseline is not None:
        fig1 = plt.figure(figsize=(8, 6))
        ax = plt.gca()
        plt.xticks(rotation=70)
        plt.plot_date(to_datetime(timestamps), baselines, '.')
        plt.ylabel('mean baseline [LSB]')
        ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
        plt.tight_layout()
        if plot_baseline.lower() == "show":
            plt.show()
        else:
            plt.savefig(plot_baseline)
        plt.close(fig1)

    # identify the bursts
    if np.all(~are_burst):
        raise SystemExit('no burst detected')
    are_burst = expand_mask(are_burst, iters=expand)
    previous_is_burst = False
    bursts = []
    for event in range(n_event):
        if are_burst[event]:
            if not previous_is_burst:
                bursts.append([event])
            if event == n_event - 1 or (not are_burst[event + 1]):
                bursts[-1].append(event)
            previous_is_burst = True
        else:
            previous_is_burst = False
    if np.all(~are_burst):
        raise SystemExit('no burst identified')

    # merge bursts which are closer than merge_sec seconds
    last_burst_begin = bursts[0][0]
    last_burst_end = bursts[0][1]
    merged_bursts = []
    for burst_idxs in bursts[1:]:
        begin_idx, end_idx = burst_idxs
        interval = (timestamps[begin_idx] - timestamps[last_burst_end])
        if interval < merge_sec * 1e9:
            last_burst_end = end_idx
        else:
            merged_bursts.append([last_burst_begin, last_burst_end])
            last_burst_begin = begin_idx
            last_burst_end = end_idx
    if len(merged_bursts) == 0 or merged_bursts[-1][0] != last_burst_begin:
        merged_bursts.append([last_burst_begin, last_burst_end])
    bursts = merged_bursts

    # output result
    if output is None:
        run_file = sys.stdout
    else:
        run_file = open(output, 'w')
    run_file.write("#burst ts_start ts_end id_start id_end\n")  # write header
    date_format = '%Y-%m-%dT%H:%M:%S'
    for i, burst_idxs in enumerate(bursts):
        begin_idx, end_idx = burst_idxs
        ts_begin = pd.to_datetime(timestamps[begin_idx]).strftime(date_format)
        ts_end = pd.to_datetime(timestamps[end_idx]).strftime(date_format)
        run_file.write(str(i) + " " + ts_begin + " " + ts_end)
        run_file.write(" " + str(event_ids[begin_idx]) + " ")
        run_file.write(str(event_ids[end_idx]) + "\n")
    if output is not None:
        run_file.close()

    if video_prefix is not None:
        for i, burst_idxs in enumerate(bursts):
            begin_idx, end_idx = burst_idxs
            events = calibration_event_stream(files, disable_bar=disable_bar)
            events = fill_digicam_baseline(events)
            if video_prefix != "show":
                video = video_prefix + "_" + str(i) + ".mp4"
            else:
                video = "show"
            animate_baseline(events,
                             video,
                             event_id_min=event_ids[begin_idx],
                             event_id_max=event_ids[end_idx])