示例#1
0
def sensitivity_gamma_efficiency(dl2_file_g,
                                 dl2_file_p,
                                 ntelescopes_gammas,
                                 ntelescopes_protons,
                                 n_bins_energy,
                                 gamma_eff_gammaness,
                                 gamma_eff_theta2,
                                 noff,
                                 obstime=50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity for cuts based
    on gamma efficiency

    Parameters
    ----------
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    ntelescopes_gammas: `int` number of telescopes used
    ntelescopes_protons: `int` number of telescopes used
    n_bins_energy: `int` number of bins in energy
    gamma_eff_gammaness: `float` between 0 and 1 %/100
    of gammas to be left after cut in gammaness
    gamma_eff_theta2: `float` between 0 and 1 %/100
    of gammas to be left after cut in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    Returns
    -------
    energy: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin

    """

    # Read simulated and reconstructed values

    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(
        dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, e_true_p, mc_par_p, events_p = process_mc(
        dl2_file_p, 'proton')

    # Account for the number of telescopes simulated
    mc_par_g['sim_ev'] = mc_par_g['sim_ev'] * ntelescopes_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev'] * ntelescopes_protons

    # Set binning for sensitivity calculation
    emin_sensitivity = mc_par_p['emin']
    emax_sensitivity = mc_par_p['emax']

    # Energy bins
    energy = np.logspace(np.log10(emin_sensitivity.to_value()),
                         np.log10(emax_sensitivity.to_value()),
                         n_bins_energy + 1) * u.TeV

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(energy)
    dFdEd0, proton_par = proton_bess(energy)

    # Rates and weights

    w_g = get_weights(mc_par_g, crab_par)
    w_p = get_weights(mc_par_p, proton_par)

    if (w_g.unit == u.Unit("sr / s")):
        print(
            "You are using diffuse gammas to estimate point-like sensitivity")
        print("These results will make no sense")
        w_g = w_g / u.sr  # Fix to make tests pass

    rate_weighted_g = ((e_true_g / crab_par['e0']) ** (crab_par['alpha'] - mc_par_g['sp_idx'])) \
                      * w_g
    rate_weighted_p = ((e_true_p / proton_par['e0']) ** (proton_par['alpha'] - mc_par_p['sp_idx'])) \
                      * w_p

    # For background, select protons contained in a ring overlapping with the ON region
    p_contained, ang_area_p = ring_containment(angdist2_p, 1.0 * u.deg,
                                               0.9 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input

    # Initialize arrays

    final_gammas = np.ndarray(shape=(n_bins_energy))
    final_protons = np.ndarray(shape=(n_bins_energy))
    pre_gammas = np.ndarray(shape=(n_bins_energy))
    pre_protons = np.ndarray(shape=(n_bins_energy))
    weighted_gamma_per_ebin = np.ndarray(n_bins_energy)
    weighted_proton_per_ebin = np.ndarray(n_bins_energy)
    sensitivity = np.ndarray(shape=n_bins_energy)
    n_excesses_min = np.ndarray(shape=n_bins_energy)
    eff_g = np.ndarray(shape=n_bins_energy)
    eff_p = np.ndarray(shape=n_bins_energy)
    gcut = np.ndarray(shape=n_bins_energy)
    tcut = np.ndarray(shape=n_bins_energy)
    gamma_rate = np.ndarray(shape=n_bins_energy)
    proton_rate = np.ndarray(shape=n_bins_energy)

    # Total rate of gammas and protons
    total_rate_proton = np.sum(rate_weighted_p)
    total_rate_gamma = np.sum(rate_weighted_g)

    print("Total rate triggered proton {:.3f} Hz".format(total_rate_proton))
    print("Total rate triggered gamma  {:.3f} Hz".format(total_rate_gamma))

    # Dataframe to store the events which survive the cuts
    gammalike_events = pd.DataFrame(columns=events_g.keys())

    # Weight events and count number of events per bin:
    for i in range(0, n_bins_energy):  # binning in energy

        print("\n******** Energy bin: {:.3f} - {:.3f} TeV ********".format(
            energy[i].value, energy[i + 1].value))
        total_rate_proton_ebin = np.sum(
            rate_weighted_p[(e_reco_p < energy[i + 1])
                            & (e_reco_p > energy[i])])
        total_rate_gamma_ebin = np.sum(
            rate_weighted_g[(e_reco_g < energy[i + 1])
                            & (e_reco_g > energy[i])])

        # print("**************")
        print("Total rate triggered proton in this bin {:.5f} Hz".format(
            total_rate_proton_ebin.value))
        print("Total rate triggered gamma in this bin {:.5f} Hz".format(
            total_rate_gamma_ebin.value))

        # Calculate the cuts in gammaness and theta2 based on efficiency of weighted gammas

        rates_g = rate_weighted_g[(e_reco_g < energy[i + 1])
                                  & (e_reco_g > energy[i])]
        events_bin_g = events_g[(e_reco_g < energy[i + 1])
                                & (e_reco_g > energy[i])]
        events_bin_p = events_p[(e_reco_p < energy[i + 1])
                                & (e_reco_p > energy[i])]

        best_g_cut = find_cut(events_bin_g, rates_g, obstime, "gammaness", 0.1,
                              1.0, gamma_eff_gammaness)
        best_theta2_cut = find_cut(events_bin_g, rates_g, obstime, "theta2",
                                   0.0, 10.0, gamma_eff_theta2) * u.deg**2

        events_bin_after_cuts_g = events_bin_g[
            (events_bin_g.gammaness > best_g_cut)
            & (events_bin_g.theta2 < best_theta2_cut)]
        events_bin_after_cuts_p = events_bin_p[
            (events_bin_p.gammaness > best_g_cut)
            & (events_bin_p.theta2 < best_theta2_cut)]

        # Save the survived events in the dataframe
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_g))
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_p))

        # ratio between the area where we search for protons ang_area_p
        # and the area where we search for gammas math.pi * t
        area_ratio_p = np.pi * best_theta2_cut / ang_area_p

        rate_g_ebin = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                             & (gammaness_g > best_g_cut) & (theta2_g < best_theta2_cut)])

        rate_p_ebin = np.sum(rate_weighted_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                             & (gammaness_p > best_g_cut) & p_contained])

        gamma_rate[i] = rate_g_ebin.to(1 / u.min).to_value()
        proton_rate[i] = rate_p_ebin.to(1 / u.min).to_value()

        final_gammas[i] = rate_g_ebin * obstime
        final_protons[i] = rate_p_ebin * obstime * area_ratio_p

        pre_gammas[i] = e_reco_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                 & (gammaness_g > best_g_cut) & (theta2_g < best_theta2_cut)].shape[0]
        pre_protons[i] = e_reco_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                  & (gammaness_p > best_g_cut) & p_contained].shape[0]

        weighted_gamma_per_ebin[i] = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & \
                                                            (e_reco_g > energy[i])]) * obstime
        weighted_proton_per_ebin[i] = np.sum(rate_weighted_p[(e_reco_p < energy[i + 1]) & \
                                                             (e_reco_p > energy[i])]) * obstime

        gcut[i] = best_g_cut
        tcut[i] = best_theta2_cut.to_value()

        eff_g[i] = final_gammas[i] / weighted_gamma_per_ebin[i]
        eff_p[i] = final_protons[i] / weighted_proton_per_ebin[i]

    n_excesses_min, sensitivity = calculate_sensitivity_lima(
        final_gammas, final_protons * noff,
        1 / noff * np.ones_like(final_gammas))

    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    min_pre_events = 5

    # Set conditions for calculating sensitivity

    conditions = ((sensitivity <= 0)
                  | (pre_gammas < min_pre_events)
                  | (pre_protons < min_pre_events)
                  | (final_gammas < min_num_events))

    sensitivity[conditions] = np.inf

    # Compute sensitivity in flux units
    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)
    sensitivity_flux = sensitivity / 100 * (dFdE * egeom * egeom).to(
        u.TeV / (u.cm**2 * u.s))

    print("\n******** Energy [TeV] *********\n")
    print(egeom)
    print("\nsensitivity flux:\n", sensitivity_flux)
    print("\nsensitivity[%]:\n", sensitivity)
    print("\n**************\n")

    list_of_tuples = list(
        zip(energy[:energy.shape[0] - 1].to_value(), energy[1:].to_value(),
            gcut, tcut, final_gammas, final_protons, gamma_rate,
            proton_rate, n_excesses_min, sensitivity,
            sensitivity_flux.to_value(), eff_g, eff_p, pre_gammas,
            pre_protons))

    result = pd.DataFrame(
        list_of_tuples,
        columns=[
            'ebin_low', 'ebin_up', 'gammaness_cut', 'theta2_cut',
            'gammas_reweighted', 'protons_reweighted', 'gamma_rate',
            'proton_rate', 'n_excesses_min', 'relative_sensitivity',
            'sensitivity_flux', 'eff_gamma', 'eff_proton', 'mc_gammas',
            'mc_protons'
        ])

    return energy, sensitivity, result, gammalike_events, gcut, tcut
示例#2
0
def sensitivity(simtelfile_gammas,
                simtelfile_protons,
                dl2_file_g,
                dl2_file_p,
                nfiles_gammas,
                nfiles_protons,
                n_bins_energy,
                gcut,
                tcut,
                noff,
                obstime=50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    n_bins_energy: `int` number of bins in energy
    n_bins_gammaness: `int` number of bins in gammaness
    n_bins_theta2: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    Returns
    ---------
    energy: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin

    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(
        simtelfile_gammas, dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, e_true_p, mc_par_p, events_p = process_mc(
        simtelfile_protons, dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev'] * nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev'] * nfiles_protons

    # Pass units to TeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.TeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.TeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.TeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.TeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to(u.cm**2)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to(u.cm**2)

    # Set binning for sensitivity calculation
    emin_sensitivity = 0.01 * u.TeV  # mc_par_g['emin']
    emax_sensitivity = 100 * u.TeV  # mc_par_g['emax']

    energy = np.logspace(np.log10(emin_sensitivity.to_value()),
                         np.log10(emax_sensitivity.to_value()),
                         n_bins_energy + 1) * u.TeV

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(energy)
    dFdEd0, proton_par = proton_bess(energy)

    bins = np.logspace(np.log10(emin_sensitivity.to_value()),
                       np.log10(emax_sensitivity.to_value()),
                       n_bins_energy + 1)
    y0 = mc_par_g['sim_ev'] / (mc_par_g['emax'].to_value() ** (mc_par_g['sp_idx'] + 1) \
                               - mc_par_g['emin'].to_value() ** (mc_par_g['sp_idx'] + 1)) \
         * (mc_par_g['sp_idx'] + 1)
    y = y0 * (bins[1:]**(crab_par['alpha'] + 1) -
              bins[:-1]**(crab_par['alpha'] + 1)) / (crab_par['alpha'] + 1)

    n_sim_bin = y

    # Rates and weights
    rate_g = rate("PowerLaw", mc_par_g['emin'], mc_par_g['emax'], crab_par,
                  mc_par_g['cone'], mc_par_g['area_sim'])

    rate_p = rate("PowerLaw", mc_par_p['emin'], mc_par_p['emax'], proton_par,
                  mc_par_p['cone'], mc_par_p['area_sim'])

    w_g = weight("PowerLaw", mc_par_g['emin'], mc_par_g['emax'],
                 mc_par_g['sp_idx'], rate_g, mc_par_g['sim_ev'], crab_par)

    w_p = weight("PowerLaw", mc_par_p['emin'], mc_par_p['emax'],
                 mc_par_p['sp_idx'], rate_p, mc_par_p['sim_ev'], proton_par)

    if (w_g.unit == u.Unit("sr / s")):
        print(
            "You are using diffuse gammas to estimate point-like sensitivity")
        print("These results will make no sense")
        w_g = w_g / u.sr  # Fix to make tests pass

    rate_weighted_g = ((e_true_g / crab_par['e0']) ** (crab_par['alpha'] - mc_par_g['sp_idx'])) \
                      * w_g
    rate_weighted_p = ((e_true_p / proton_par['e0']) ** (proton_par['alpha'] - mc_par_p['sp_idx'])) \
                      * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg,
                                               0.3 * u.deg)

    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * tcut / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(n_bins_energy))
    final_hadrons = np.ndarray(shape=(n_bins_energy))
    pre_gamma = np.ndarray(shape=(n_bins_energy))
    pre_hadrons = np.ndarray(shape=(n_bins_energy))

    ngamma_per_ebin = np.ndarray(n_bins_energy)
    nhadron_per_ebin = np.ndarray(n_bins_energy)

    # Weight events and count number of events per bin:
    for i in range(n_bins_energy):  # binning in energy
        rate_g_ebin = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                             & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])])


        rate_p_ebin = np.sum(rate_weighted_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                             & (gammaness_p > gcut[i]) & p_contained])
        final_gamma[i] = (rate_g_ebin * obstime).value
        final_hadrons[i] = (rate_p_ebin * obstime).value * area_ratio_p[i]

        pre_gamma[i] = e_reco_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])].shape[0]
        pre_hadrons[i] = e_reco_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                  & (gammaness_p > gcut[i]) & p_contained].shape[0]

        ngamma_per_ebin[i] = np.sum(
            rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i])].to(1 / u.s).value) \
                             * obstime.to(u.s).value
        nhadron_per_ebin[i] = np.sum(
            rate_weighted_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i])].to(1 / u.s).value) \
                              * obstime.to(u.s).value

    n_excesses_5sigma, sensitivity_3Darray = calculate_sensitivity_lima_ebin(
        final_gamma, final_hadrons * noff,
        1 / noff * np.ones(len(final_gamma)), n_bins_energy)
    # Avoid bins which are empty or have too few events:
    min_num_events = 5
    min_pre_events = 5
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization
    for i in range(0, n_bins_energy):
        conditions = (not np.isfinite(sensitivity_3Darray[i])) or (sensitivity_3Darray[i] <= 0) \
                     or (final_hadrons[i] < min_num_events) \
                     or (pre_gamma[i] < min_pre_events) \
                     or (pre_hadrons[i] < min_pre_events)
        if conditions:
            sensitivity_3Darray[i] = np.inf

    # Quantities to show in the results
    sensitivity = np.ndarray(shape=n_bins_energy)
    n_excesses_min = np.ndarray(shape=n_bins_energy)
    eff_g = np.ndarray(shape=n_bins_energy)
    eff_p = np.ndarray(shape=n_bins_energy)
    ngammas = np.ndarray(shape=n_bins_energy)
    nhadrons = np.ndarray(shape=n_bins_energy)
    gammarate = np.ndarray(shape=n_bins_energy)
    hadronrate = np.ndarray(shape=n_bins_energy)
    eff_area = np.ndarray(shape=n_bins_energy)
    nevents_gamma = np.ndarray(shape=n_bins_energy)
    nevents_proton = np.ndarray(shape=n_bins_energy)

    # Calculate the minimum sensitivity per energy bin
    for i in range(0, n_bins_energy):
        ngammas[i] = final_gamma[i]
        nhadrons[i] = final_hadrons[i]
        gammarate[i] = final_gamma[i] / (obstime.to(u.min)).to_value()
        hadronrate[i] = final_hadrons[i] / (obstime.to(u.min)).to_value()
        n_excesses_min[i] = n_excesses_5sigma[i]
        sensitivity[i] = sensitivity_3Darray[i]
        eff_g[i] = final_gamma[i] / ngamma_per_ebin[i]
        eff_p[i] = final_hadrons[i] / nhadron_per_ebin[i]

        e_aftercuts = e_true_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                               & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])]

        e_aftercuts_p = e_true_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                 & (gammaness_p > gcut[i]) & p_contained]

        e_aftercuts_w = np.sum(
            np.power(e_aftercuts, crab_par['alpha'] - mc_par_g['sp_idx']))

        e_w = np.sum(
            np.power(
                e_true_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i])],
                crab_par['alpha'] - mc_par_g['sp_idx']))

        eff_area[i] = e_aftercuts_w.to_value(
        ) / n_sim_bin[i] * mc_par_g['area_sim'].to(u.m**2).to_value()

        nevents_gamma[i] = e_aftercuts.shape[0]
        nevents_proton[i] = e_aftercuts_p.shape[0]

    # Compute sensitivity  in flux units

    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)
    sensitivity_flux = sensitivity / 100 * (dFdE * egeom * egeom).to(
        u.erg / (u.cm**2 * u.s))

    print("\n******** Energy [TeV] *********\n")
    print(egeom)
    print("\nsensitivity flux:\n", sensitivity_flux)
    print("\nsensitivity[%]:\n", sensitivity)
    print("\n**************\n")

    list_of_tuples = list(
        zip(energy[:energy.shape[0] - 2].to_value(), energy[1:].to_value(),
            gcut, tcut, ngammas,
            nhadrons, gammarate, hadronrate, n_excesses_min,
            sensitivity_flux.to_value(), eff_area, eff_g, eff_p, nevents_gamma,
            nevents_proton))
    result = pd.DataFrame(list_of_tuples,
                          columns=[
                              'ebin_low', 'ebin_up', 'gammaness_cut',
                              'theta2_cut', 'n_gammas', 'n_hadrons',
                              'gamma_rate', 'hadron_rate', 'n_excesses_min',
                              'sensitivity', 'eff_area', 'eff_gamma',
                              'eff_hadron', 'nevents_g', 'nevents_p'
                          ])

    units = [
        energy.unit, energy.unit, "", tcut.unit, "", "", u.min**-1, u.min**-1,
        "", sensitivity_flux.unit, mc_par_g['area_sim'].to(u.cm**2).unit, "",
        "", "", ""
    ]

    # sensitivity_minimization_plot(n_bins_energy, n_bins_gammaness, n_bins_theta2, energy, sensitivity)
    # plot_positions_survived_events(events_g,
    #                                events_p,
    #                                gammaness_g, gammaness_p,
    #                                theta2_g, p_contained, sensitivity, energy, n_bins_energy, gcut, tcut)

    # Build dataframe of events that survive the cuts:
    events = pd.concat((events_g, events_p))
    dl2 = pd.DataFrame(columns=events.keys())

    for i in range(0, n_bins_energy):
        df_bin = events[(events.mc_energy < energy[i+1]) & (events.mc_energy > energy[i]) \
                               & (events.gammaness > gcut[i]) & (events.theta2 < tcut[i])]

        dl2 = pd.concat((dl2, df_bin))

    return energy, sensitivity, result, units, dl2
示例#3
0
def sens(simtelfile_gammas, simtelfile_protons,
         dl2_file_g, dl2_file_p,
         nfiles_gammas, nfiles_protons,
         eb, gcut, tcut, noff,
         obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    TODO: Give files as input in a configuration file!
    Returns
    E: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin
    ---------
    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(simtelfile_gammas,
                                                           dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, e_true_p, mc_par_p, events_p = process_mc(simtelfile_protons,
                                                             dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev']*nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev']*nfiles_protons

    #Pass units to GeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.GeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.GeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.GeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.GeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to(u.cm**2)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to(u.cm**2)

    #Set binning for sensitivity calculation
    emin_sens = 10**1 * u.GeV #mc_par_g['emin']
    emax_sens = 10**5 * u.GeV #mc_par_g['emax']

    E = np.logspace(np.log10(emin_sens.to_value()),
                np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    #Number of simulated events per energy bin
    """
    bins, n_sim_bin = power_law_integrated_distribution(emin_sens.to_value(),
                                                        emax_sens.to_value(),
                                                        mc_par_g['sim_ev'],
                                                        mc_par_g['sp_idx'], eb+1)


    """
    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    bins = np.logspace(np.log10(emin_sens.to_value()), np.log10(emax_sens.to_value()), eb+1)
    y0 = mc_par_g['sim_ev'] / (mc_par_g['emax'].to_value()**(mc_par_g['sp_idx'] + 1) \
                               - mc_par_g['emin'].to_value()**(mc_par_g['sp_idx'] + 1)) \
        * (mc_par_g['sp_idx'] + 1)
    y = y0 * (bins[1:]**(crab_par['alpha'] + 1) - bins[:-1]**(crab_par['alpha'] + 1)) / (crab_par['alpha'] + 1)

    n_sim_bin = y


    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], crab_par['alpha'],
                     mc_par_g['cone'], mc_par_g['area_sim'],
                     crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], proton_par['alpha'],
                     mc_par_p['cone'], mc_par_p['area_sim'],
                     proton_par['f0'], proton_par['e0'])

    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                    crab_par['alpha'], rate_g,
                    mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                    proton_par['alpha'], rate_p,
                    mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_p['sp_idx'])) \
                * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg, 0.2 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * tcut / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb))
    final_hadrons = np.ndarray(shape=(eb))
    pre_gamma = np.ndarray(shape=(eb))
    pre_hadrons = np.ndarray(shape=(eb))

    ngamma_per_ebin = np.ndarray(eb)
    nhadron_per_ebin = np.ndarray(eb)

    # Weight events and count number of events per bin:
    for i in range(0,eb):  # binning in energy
        eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                    & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])])

        ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                    & (gammaness_p > gcut[i]) & p_contained])
        final_gamma[i] = eg_w_sum * obstime
        final_hadrons[i] = ep_w_sum * obstime * area_ratio_p[i]

        pre_gamma[i] = e_reco_g[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])].shape[0]
        pre_hadrons[i] = e_reco_p[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                  & (gammaness_p > gcut[i]) & p_contained].shape[0]

        ngamma_per_ebin[i] = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i])]) * obstime
        nhadron_per_ebin[i] = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i])]) * obstime

    nex_5sigma, sens = calculate_sensitivity_lima_1d(final_gamma, final_hadrons * noff, 1/noff,
                                                  eb)
    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    min_pre_events = 10
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization
    for i in range(0, eb):
        conditions = (not np.isfinite(sens[i])) or (sens[i]<=0) \
                     or (final_hadrons[i] < min_num_events) \
                     or (pre_gamma[i] < min_pre_events) \
                     or (pre_hadrons[i] < min_pre_events)
        if conditions:
            sens[i] = np.inf

    #Quantities to show in the results
    sensitivity = np.ndarray(shape=eb)
    nex_min = np.ndarray(shape=eb)
    eff_g = np.ndarray(shape=eb)
    eff_p = np.ndarray(shape=eb)
    ngammas = np.ndarray(shape=eb)
    nhadrons = np.ndarray(shape=eb)
    gammarate = np.ndarray(shape=eb)
    hadronrate = np.ndarray(shape=eb)
    eff_area = np.ndarray(shape=eb)
    nevents_gamma = np.ndarray(shape=eb)
    nevents_proton = np.ndarray(shape=eb)

    # Calculate the minimum sensitivity per energy bin
    for i in range(0,eb):
        ngammas[i] = final_gamma[i]
        nhadrons[i] = final_hadrons[i]
        gammarate[i] = final_gamma[i]/(obstime.to(u.min)).to_value()
        hadronrate[i] = final_hadrons[i]/(obstime.to(u.min)).to_value()
        nex_min[i] =  nex_5sigma[i]
        sensitivity[i] = sens[i]
        eff_g[i] = final_gamma[i]/ngamma_per_ebin[i]
        eff_p[i] = final_hadrons[i]/nhadron_per_ebin[i]

        e_aftercuts = e_true_g[(e_true_g < E[i+1]) & (e_true_g > E[i]) \
                               & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])]

        e_aftercuts_p = e_true_p[(e_true_p < E[i+1]) & (e_true_p > E[i]) \
                                 & (gammaness_p > gcut[i]) & p_contained]

        e_aftercuts_w = np.sum(np.power(e_aftercuts, crab_par['alpha']-mc_par_g['sp_idx']))

        e_w = np.sum(np.power(e_true_g[(e_true_g < E[i+1]) & (e_true_g > E[i])],
                              crab_par['alpha']-mc_par_g['sp_idx']))

        eff_area[i] = e_aftercuts_w.to_value() / n_sim_bin[i] * mc_par_g['area_sim'].to(u.m**2).to_value()

        nevents_gamma[i] = e_aftercuts.shape[0]
        nevents_proton[i] = e_aftercuts_p.shape[0]

    #Compute sensitivity  in flux units

    emed = np.sqrt(E[1:] * E[:-1])
    dFdE, par = crab_magic(emed)
    sens_flux = sensitivity / 100 * (dFdE * emed * emed).to(u.erg / (u.cm**2 * u.s))

    list_of_tuples = list(zip(E[:E.shape[0]-2].to_value(), E[1:].to_value(), gcut, tcut,
                            ngammas, nhadrons,
                            gammarate, hadronrate,
                            nex_min, sens_flux.to_value(), eff_area,
                              eff_g, eff_p, nevents_gamma, nevents_proton))
    result = pd.DataFrame(list_of_tuples,
                           columns=['ebin_low', 'ebin_up', 'gammaness_cut', 'theta2_cut',
                                    'n_gammas', 'n_hadrons',
                                    'gamma_rate', 'hadron_rate',
                                    'nex_min', 'sensitivity','eff_area',
                                    'eff_gamma', 'eff_hadron',
                                    'nevents_g', 'nevents_p'])

    units = [E.unit, E.unit,"", tcut.unit,"", "",
             u.min**-1, u.min**-1, "",
             sens_flux.unit, mc_par_g['area_sim'].to(u.m**2).unit, "", "", "", ""]

    """
    sens_minimization_plot(eb, gb, tb, E, sens)
    
    plot_positions_survived_events(events_g,
                                   events_p,
                                   gammaness_g, gammaness_p,
                                   theta2_g, p_contained, sens, E, eb, gcut, tcut)
    """
    # Build dataframe of events that survive the cuts:
    events = pd.concat((events_g, events_p))
    dl2 = pd.DataFrame(columns=events.keys())
    for i in range(0,eb):
        df_bin = events[(10**events.mc_energy < E[i+1]) & (10**events.mc_energy > E[i]) \
                               & (events.gammaness > gcut[i]) & (events.theta2 < tcut[i])]

        dl2 = pd.concat((dl2, df_bin))

    return E, sensitivity, result, units, dl2
示例#4
0
def sens(simtelfile_gammas, simtelfile_protons,
         dl2_file_g, dl2_file_p,
         nfiles_gammas, nfiles_protons,
         eb, gb, tb, noff,
         obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    TODO: Give files as input in a configuration file!
    Returns
    E: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin
    ---------
    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, mc_par_g = process_mc(simtelfile_gammas,
                                                           dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, mc_par_p = process_mc(simtelfile_protons,
                                                             dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev']*nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev']*nfiles_protons

    #Pass units to GeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.GeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.GeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.GeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.GeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to( u.cm * u.cm)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to( u.cm * u.cm)

    #Set binning for sensitivity calculation
    emin_sens = 10**1 * u.GeV #mc_par_g['emin']
    emax_sens = 10**5 * u.GeV #mc_par_g['emax']

    E = np.logspace(np.log10(emin_sens.to_value()),
                np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    g, t = bin_definition(gb, tb)

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                     mc_par_g['cone'], mc_par_g['area_sim'],
                     crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                     mc_par_p['cone'], mc_par_p['area_sim'],
                     proton_par['f0'], proton_par['e0'])


    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                    crab_par['alpha'], rate_g,
                    mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                    proton_par['alpha'], rate_p,
                    mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg, 0.1 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * t / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb, gb, tb))
    final_hadrons = np.ndarray(shape=(eb, gb, tb))


    # Weight events and count number of events per bin:
    for i in range(0,eb):  # binning in energy
        for j in range(0,gb):  # cut in gammaness
            for k in range(0,tb):  # cut in theta2
                eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                            & (gammaness_g > g[j]) & (theta2_g < t[k])])

                ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                            & (gammaness_p > g[j]) & p_contained])

                final_gamma[i][j][k] = eg_w_sum * obstime
                final_hadrons[i][j][k] = ep_w_sum * obstime * area_ratio_p[k]

    sens = calculate_sensitivity_lima(final_gamma, final_hadrons * noff, 1/noff)

    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization

    for i in range(0, eb):
        for j in range(0, gb):
            for k in range(0, tb):
                conditions = (not np.isfinite(sens[i,j,k])) or (sens[i,j,k]<=0) \
                             or (final_gamma[i,j,k] < min_num_events) \
                             or (final_hadrons[i,j,k] < min_num_events) \
                             or (not final_gamma[i,j,k] > final_hadrons[i,j,k] * 0.05)
                if conditions:
                    sens[i][j][k] = np.inf

    # Calculate the minimum sensitivity per energy bin
    sensitivity = np.ndarray(shape=eb)

    print("BEST CUTS: ")
    print("Energy bin(GeV) Gammaness Theta2(deg2) Ngamma Nbkg Ngamma/min Nbkg/min")
    for i in range(0,eb):
        ind = np.unravel_index(np.nanargmin(sens[i], axis=None), sens[i].shape)
        print("%.2f" % E[i].to_value(),"-","%.2f" % E[i+1].to_value(),"%.2f" % g[ind[0]],
              "%.2f" % t[ind[1]].to_value(), "%.2f" % final_gamma[i][ind],
              "%.2f" % final_hadrons[i][ind], "%.2f" % (final_gamma[i][ind]/(60*50)),
              "%.2f" % (final_hadrons[i][ind]/(60*50)))
        sensitivity[i] = sens[i][ind]

    return E, sensitivity
示例#5
0
def sensitivity(emin_sens, emax_sens, eb, gb, tb, noff, obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region

    TODO: Give files as input in a configuration file!
    Returns
    ---------

    """
   
    # Read files
    simtelfile_gammas = "/home/queenmab/DATA/LST1/Gamma/gamma_20deg_0deg_run8___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz"
    simtelfile_protons = "/home/queenmab/DATA/LST1/Proton/proton_20deg_0deg_run194___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz"
    PATH_EVENTS = "../../cta-lstchain-extra/reco/sample_data/dl2/"
    dl2_file_g = PATH_EVENTS+"/reco_gammas.h5" 
    dl2_file_p = PATH_EVENTS+"/reco_protons.h5"

    # Extract spectral parameters
    E = np.logspace(np.log10(emin_sens.to_value()), 
                     np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, mc_par_g = process_mc(simtelfile_gammas, dl2_file_g)
    gammaness_p, theta2_p, e_reco_p, mc_par_p = process_mc(simtelfile_protons, dl2_file_p)

    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'], \
                  mc_par_g['cone'], mc_par_g['area_sim'], crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'], \
                  mc_par_p['cone'], mc_par_p['area_sim'], proton_par['f0'], proton_par['e0'])


    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                 crab_par['alpha'], rate_g, mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                 proton_par['alpha'], rate_p, mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
        * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_g['sp_idx'])) \
        * w_p

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb, gb, tb))
    final_hadrons = np.ndarray(shape=(eb, gb, tb))

    g, t = bin_definition(gb, tb)

    for i in range(0,eb):  # binning in energy
        for j in range(0,gb):  # cut in gammaness
            for k in range(0,tb):  # cut in theta2
                eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1].to_value()) & (e_reco_g > E[i].to_value()) \
                                         & (gammaness_g > g[j]) & (theta2_g < t[k])])

                ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1].to_value()) & (e_reco_p > E[i].to_value()) \
                                         & (gammaness_p > g[j]) & (theta2_p < t[k])])
            
                final_gamma[i][j][k] = eg_w_sum * obstime
                final_hadrons[i][j][k] = ep_w_sum * obstime
                    
    sens = calculate_sensitivity(final_gamma.to_value(), final_hadrons.to_value(), 1/noff)
    
    # Calculate the minimum sensitivity per energy bin
    sensitivity = np.ndarray(shape=eb)
    for i in range(0,eb):
        ind = np.unravel_index(np.argmin(sens[i], axis=None), sens[i].shape)
        sensitivity[i] = sens[i][ind]

    sens_minimization_plot(eb, gb, tb, E, sens)
    sens_plot(eb, E, sensitivity)