コード例 #1
0
def plot_sensitivity(energy, sensitivity, ax=None, **kwargs):
    """
    Plot the achieved sensitivity

    Parameters
    --------
    ax:          `matplotlib.pyplot.axis`
    energy:      `astropy.units.quantity.Quantity`  energy array
    sensitivity: `numpy.ndarray`  sensitivity array (bins of energy)

    Returns
    --------
    ax:    `matplotlib.pyplot.axis`

    """
    ax = plt.gca() if ax is None else ax

    mask = sensitivity < 1e100 * sensitivity.unit
    egeom = np.sqrt(energy[1:] * energy[:-1])
    binsize = (energy[1:] - energy[:-1]) / 2

    dFdE = crab_hegra(egeom[mask])

    ax.set_yscale("log")
    ax.set_xscale("log")

    ax.errorbar(egeom[mask].to_value(),
                (sensitivity[mask] / 100 * (dFdE[0] * egeom[mask] \
                                            * egeom[mask]).to(u.TeV / (u.cm * u.cm * u.s))).to_value(),
                xerr=binsize[mask].to_value(), marker='o', color='C3', label='Sensitivity')

    return ax
コード例 #2
0
ファイル: plot_utils.py プロジェクト: dkerszberg/cta-lstchain
def plot_sensitivity(ax, e, sensitivity):
    """

    Parameters
    --------

    Returns
    --------
    """
    emed = np.sqrt(e[1:] * e[:-1])
    dFdE = crab_hegra(emed)
    ax.loglog(emed, sensitivity / 100 * dFdE * emed * emed, \
              label = 'Sensitivity')
コード例 #3
0
ファイル: plot_utils.py プロジェクト: dkerszberg/cta-lstchain
def plot_Crab_SED(ax, percentage, emin, emax, **kwargs):
    """

    Parameters
    --------

    Returns
    --------
    """
    En = np.logspace(np.log10(emin.to_value()), np.log10(emax.to_value()),
                     40) * u.GeV

    dFdE = percentage / 100. * crab_hegra(En)
    ax.loglog(En, dFdE[0] * En * En, color='gray', **kwargs)

    return ax
コード例 #4
0
ファイル: plot_utils.py プロジェクト: Hckjs/cta-lstchain
def plot_sensitivity(energy, sensitivity, ax=None, **kwargs):
    """
    Plot the achieved sensitivity

    Parameters
    ----------
    ax:          `matplotlib.pyplot.axis`
    energy:      `astropy.units.quantity.Quantity`  energy array
    sensitivity: `numpy.ndarray`  sensitivity array (bins of energy)

    Returns
    -------
    ax:    `matplotlib.pyplot.axis`

    """
    ax = plt.gca() if ax is None else ax

    mask = sensitivity < 1e100 * sensitivity.unit
    egeom = np.sqrt(energy[1:] * energy[:-1])
    binsize = (energy[1:] - energy[:-1]) / 2

    dFdE = crab_hegra(egeom[mask])

    ax.set_yscale("log")
    ax.set_xscale("log")

    kwargs.setdefault('marker', 'o')
    kwargs.setdefault('color', 'C3')
    kwargs.setdefault('label', 'sensitivity')

    sens_unit = u.TeV / (u.cm * u.cm * u.s)
    with quantity_support():
        ax.errorbar(egeom[mask],
                    sensitivity[mask] / (100 * sens_unit) *
                    (dFdE[0] * egeom[mask] * egeom[mask]).to(sens_unit),
                    xerr=binsize[mask],
                    **kwargs)

    ax.set_xlabel(f'Energy / {energy.unit}')
    ax.set_ylabel(f'Sensitivity / ({sensitivity.unit})')
    plt.tight_layout()

    return ax
コード例 #5
0
ファイル: sensitivity.py プロジェクト: Hckjs/cta-lstchain
def sensitivity_gamma_efficiency_real_data(dl2_file_on,
                                           dl2_file_off,
                                           gcut,
                                           tcut,
                                           n_bins_energy,
                                           energy,
                                           gamma_eff_gammaness,
                                           gamma_eff_theta2,
                                           noff,
                                           obstime=50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity for cuts based
    on gamma efficiency using real data as ON and OFF events

    Parameters
    ----------
    dl2_file_g: `string` path to h5 file of ON events
    dl2_file_p: `string' path to h5 file of OFF events
    ntelescopes_gammas: `int` number of telescopes used
    ntelescopes_protons: `int` number of telescopes used
    n_bins_energy: `int` number of bins in energy
    gamma_eff_gammaness: `float` between 0 and 1 %/100
    of gammas to be left after cut in gammaness
    gamma_eff_theta2: `float` between 0 and 1 %/100
    of gammas to be left after cut in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    Returns
    -------
    energy: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin

    """

    gammaness_on, theta2_on, e_reco_on, events_on, obstime_on = process_real(
        dl2_file_on)
    gammaness_off, angdist2_off, e_reco_off, events_off, obstime_off = process_real(
        dl2_file_off)

    # obstime_on = 6846.0 *u.s
    # obstime_off = 4188.0 *u.s

    # Extract spectral parameters
    print(energy)
    dFdE, crab_par = crab_hegra(energy)

    # For background, select protons contained in a ring overlapping with the ON region
    # p_contained, ang_area_p = ring_containment(angdist2_off, 0.6 * u.deg, 0.6 * u.deg)

    # Initialize arrays

    final_on = np.ndarray(shape=(n_bins_energy))
    final_off = np.ndarray(shape=(n_bins_energy))
    pre_on = np.ndarray(shape=(n_bins_energy))
    pre_off = np.ndarray(shape=(n_bins_energy))
    sensitivity = np.ndarray(shape=n_bins_energy)
    n_excesses_min = np.ndarray(shape=n_bins_energy)
    eff_on = np.ndarray(shape=n_bins_energy)
    eff_off = np.ndarray(shape=n_bins_energy)
    on_rate = np.ndarray(shape=n_bins_energy)
    off_rate = np.ndarray(shape=n_bins_energy)

    # Total rate of on and off data
    total_rate_off = events_off.shape[0] / obstime_off
    total_rate_on = events_on.shape[0] / obstime_on
    print("Total rate triggered OFF events {:.3f} Hz".format(total_rate_off))
    print("Total rate triggered ON events  {:.3f} Hz".format(total_rate_on))

    # Dataframe to store the events which survive the cuts
    gammalike_events = pd.DataFrame(columns=events_on.keys())

    for i in range(0, n_bins_energy):  # binning in energy

        print("\n******** Energy bin: {:.3f} - {:.3f} TeV ********".format(
            energy[i].value, energy[i + 1].value))
        total_rate_off_ebin = e_reco_off[(e_reco_off < energy[i + 1]) & (
            e_reco_off > energy[i])].shape[0] / obstime_off
        total_rate_on_ebin = e_reco_on[(e_reco_on < energy[i + 1]) & (
            e_reco_on > energy[i])].shape[0] / obstime_on

        # print("**************")
        print("Total rate triggered off events in this bin {:.5f} Hz".format(
            total_rate_off_ebin.value))
        print("Total rate triggered on events in this bin {:.5f} Hz".format(
            total_rate_on_ebin.value))

        # Calculate the cuts in gammaness and theta2 based on efficiency of weighted gammas

        events_bin_on = events_on[(e_reco_on < energy[i + 1])
                                  & (e_reco_on > energy[i])]

        events_bin_off = events_off[(e_reco_off < energy[i + 1])
                                    & (e_reco_off > energy[i])]

        best_g_cut = gcut[
            i]  # find_cut(events_bin_on, 1, obstime,  "gammaness", 0, 1.0, gamma_eff_gammaness, True)

        best_theta2_cut = tcut[
            i]  # find_cut_real(events_on_after_g_cut, events_off_after_g_cut, obstime_on, obstime_off, "theta2", 0.0, 1.0, gamma_eff_theta2) * u.deg**2
        # tcut[i]=best_theta2_cut.to_value()
        best_theta2_cut_off = 0.5  # * u.deg**2

        events_bin_after_cuts_on = events_bin_on[(events_bin_on.gammaness > best_g_cut) & \
                                                 (events_bin_on.theta2 < best_theta2_cut)]

        events_bin_after_cuts_off = events_bin_off[(events_bin_off.gammaness > best_g_cut) & \
                                                   (events_bin_off.theta2 < best_theta2_cut_off)]

        # Save the survived events in the dataframe
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_on))
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_off))

        ang_area_p = np.pi * best_theta2_cut_off
        area_ratio_p = np.pi * best_theta2_cut / ang_area_p

        rate_off_ebin = events_off[(e_reco_off < energy[i + 1]) & (e_reco_off > energy[i]) \
                                   & (gammaness_off > best_g_cut) & \
                                   (events_bin_off.theta2 < best_theta2_cut_off)].shape[0] / obstime_off

        rate_on_ebin = events_on[(e_reco_on < energy[i + 1]) & (e_reco_on > energy[i]) \
                                 & (gammaness_on > best_g_cut) & \
                                 (events_bin_on.theta2 < best_theta2_cut)].shape[0] / obstime_on

        on_rate[i] = rate_on_ebin.to(1 / u.min).to_value()
        off_rate[i] = rate_off_ebin.to(1 / u.min).to_value() * area_ratio_p

        final_on[i] = rate_on_ebin * obstime
        final_off[i] = rate_off_ebin * obstime * area_ratio_p

        pre_off[i] = e_reco_off[(e_reco_off < energy[i + 1]) & (e_reco_off > energy[i]) \
                                & (gammaness_off > best_g_cut) & (events_bin_off.theta2 < best_theta2_cut_off)].shape[0]
        pre_on[i] = e_reco_on[(e_reco_on < energy[i + 1]) & (e_reco_on > energy[i]) \
                              & (gammaness_on > best_g_cut) & \
                              (events_bin_on.theta2 < best_theta2_cut)].shape[0]

        print(on_rate[i], off_rate[i])
        print(final_on[i], final_off[i])
        print(pre_on[i], pre_off[i])

        eff_on[i] = pre_on[i] / events_bin_on.shape[0]
        eff_off[i] = pre_off[i] / events_bin_off.shape[0]

    signal = final_on - final_off

    rate_gammas = (signal / obstime).to(1 / u.min).to_value()

    n_excesses_min, sensitivity = calculate_sensitivity_lima(
        signal, final_off * noff, 1 / noff * np.ones_like(final_on))

    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    min_pre_events = 5

    # Set conditions for calculating sensitivity

    conditions = ((sensitivity <= 0)
                  | (pre_on < min_pre_events)
                  | (pre_on == 0)
                  | (final_on < min_num_events))

    sensitivity[conditions] = np.inf

    # Compute sensitivity in flux units
    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)
    sensitivity_flux = sensitivity / 100 * (dFdE * egeom * egeom).to(
        u.TeV / (u.cm**2 * u.s))

    print("\n******** Energy [TeV] *********\n")
    print(egeom)
    print("\nsensitivity flux:\n", sensitivity_flux)
    print("\nsensitivity[%]:\n", sensitivity)
    print("\n**************\n")

    list_of_tuples = list(
        zip(energy[:energy.shape[0] - 1].to_value(), energy[1:].to_value(),
            gcut, tcut, final_on, final_off,
            rate_gammas, off_rate, n_excesses_min, sensitivity,
            sensitivity_flux.to_value(), eff_on, eff_off, pre_on, pre_off))

    result = pd.DataFrame(
        list_of_tuples,
        columns=[
            'ebin_low', 'ebin_up', 'gammaness_cut', 'theta2_cut',
            'gammas_reweighted', 'protons_reweighted', 'gamma_rate',
            'proton_rate', 'n_excesses_min', 'relative_sensitivity',
            'sensitivity_flux', 'eff_gamma', 'eff_proton', 'mc_gammas',
            'mc_protons'
        ])

    return energy, sensitivity, result, gammalike_events, gcut, tcut
コード例 #6
0
ファイル: sensitivity.py プロジェクト: Hckjs/cta-lstchain
def sensitivity_gamma_efficiency_real_protons(dl2_file_g,
                                              dl2_file_p,
                                              ntelescopes_gammas,
                                              n_bins_energy,
                                              gamma_eff_gammaness,
                                              gamma_eff_theta2,
                                              noff,
                                              obstime=50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity for cuts based
    on gamma efficiency using real protons as background events

    Parameters
    ----------
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed real protons
    ntelescopes_gammas: `int` number of telescopes used
    ntelescopes_protons: `int` number of telescopes used
    n_bins_energy: `int` number of bins in energy
    gamma_eff_gammaness: `float` between 0 and 1 %/100
    of gammas to be left after cut in gammaness
    gamma_eff_theta2: `float` between 0 and 1 %/100
    of gammas to be left after cut in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    Returns
    -------
    energy: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin

    """

    # Read simulated and reconstructed values

    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(
        dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, events_p, obstime_real = process_real(
        dl2_file_p)
    e_reco_p = events_p["reco_energy"]
    gammaness_p = events_p["gammaness"]

    # Account for the number of telescopes simulated
    mc_par_g['sim_ev'] = mc_par_g['sim_ev'] * ntelescopes_gammas

    # Set binning for sensitivity calculation
    emin_sensitivity = mc_par_g['emin']
    emax_sensitivity = mc_par_g['emax']

    # Energy bins
    energy = np.logspace(np.log10(emin_sensitivity.to_value()),
                         np.log10(emax_sensitivity.to_value()),
                         n_bins_energy + 1) * u.TeV

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(energy)

    # Rates and weights
    w_g = get_weights(mc_par_g, crab_par)

    if (w_g.unit == u.Unit("sr / s")):
        print(
            "You are using diffuse gammas to estimate point-like sensitivity")
        print("These results will make no sense")
        w_g = w_g / u.sr  # Fix to make tests pass

    rate_weighted_g = ((e_true_g / crab_par['e0']) ** (crab_par['alpha'] - mc_par_g['sp_idx'])) \
                      * w_g

    # For background, select protons contained in a ring overlapping with the ON region
    p_contained, ang_area_p = ring_containment(angdist2_p, 0.5 * u.deg,
                                               0.5 * u.deg)
    # p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg, 0.3 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input

    # Initialize arrays

    final_gammas = np.ndarray(shape=(n_bins_energy))
    final_protons = np.ndarray(shape=(n_bins_energy))
    pre_gammas = np.ndarray(shape=(n_bins_energy))
    pre_protons = np.ndarray(shape=(n_bins_energy))
    weighted_gamma_per_ebin = np.ndarray(n_bins_energy)
    weighted_proton_per_ebin = np.ndarray(n_bins_energy)
    sensitivity = np.ndarray(shape=n_bins_energy)
    n_excesses_min = np.ndarray(shape=n_bins_energy)
    eff_g = np.ndarray(shape=n_bins_energy)
    eff_p = np.ndarray(shape=n_bins_energy)
    gcut = np.ndarray(shape=n_bins_energy)
    tcut = np.ndarray(shape=n_bins_energy)
    gamma_rate = np.ndarray(shape=n_bins_energy)
    proton_rate = np.ndarray(shape=n_bins_energy)

    # Total rate of gammas and protons
    total_rate_proton = events_p.shape[0] / obstime_real
    total_rate_gamma = np.sum(rate_weighted_g)

    print("Total rate triggered proton {:.3f} Hz".format(total_rate_proton))
    print("Total rate triggered gamma  {:.3f} Hz".format(total_rate_gamma))

    # Dataframe to store the events which survive the cuts
    gammalike_events = pd.DataFrame(columns=events_g.keys())

    # Weight events and count number of events per bin:
    for i in range(0, n_bins_energy):  # binning in energy

        print("\n******** Energy bin: {:.3f} - {:.3f} TeV ********".format(
            energy[i].value, energy[i + 1].value))
        total_rate_proton_ebin = e_reco_p[(e_reco_p < energy[i + 1]) & (
            e_reco_p > energy[i])].shape[0] / obstime_real
        total_rate_gamma_ebin = np.sum(
            rate_weighted_g[(e_reco_g < energy[i + 1])
                            & (e_reco_g > energy[i])])

        # print("**************")
        print("Total rate triggered proton in this bin {:.5f} Hz".format(
            total_rate_proton_ebin.value))
        print("Total rate triggered gamma in this bin {:.5f} Hz".format(
            total_rate_gamma_ebin.value))

        # Calculate the cuts in gammaness and theta2 based on efficiency of weighted gammas

        rates_g = rate_weighted_g[(e_reco_g < energy[i + 1])
                                  & (e_reco_g > energy[i])]
        events_bin_g = events_g[(e_reco_g < energy[i + 1])
                                & (e_reco_g > energy[i])]
        events_bin_p = events_p[(e_reco_p < energy[i + 1])
                                & (e_reco_p > energy[i])]

        best_g_cut = find_cut(events_bin_g, rates_g, obstime, "gammaness", 0.0,
                              1.0, gamma_eff_gammaness)

        events_g_after_g_cut = events_bin_g[
            events_bin_g.gammaness > best_g_cut]
        rates_g_after_g_cut = rates_g[events_bin_g.gammaness > best_g_cut]

        best_theta2_cut = find_cut(events_g_after_g_cut, rates_g_after_g_cut,
                                   obstime, "theta2", 0.0, .5,
                                   gamma_eff_theta2) * u.deg**2

        events_bin_after_cuts_g = events_bin_g[
            (events_bin_g.gammaness > best_g_cut)
            & (events_bin_g.theta2 < best_theta2_cut)]

        events_bin_after_cuts_p = events_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) & \
                                           (gammaness_p > best_g_cut) & p_contained]

        # Save the survived events in the dataframe
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_g))
        gammalike_events = pd.concat(
            (gammalike_events, events_bin_after_cuts_p))

        # ratio between the area where we search for protons ang_area_p
        # and the area where we search for gammas math.pi * t
        area_ratio_p = np.pi * best_theta2_cut / ang_area_p

        rate_g_ebin = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                             & (gammaness_g > best_g_cut) & (theta2_g < best_theta2_cut)])

        rate_p_ebin = events_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                               & (gammaness_p > best_g_cut) & p_contained].shape[0] / obstime_real

        gamma_rate[i] = rate_g_ebin.to(1 / u.min).to_value()
        proton_rate[i] = rate_p_ebin.to(1 / u.min).to_value() * area_ratio_p

        final_gammas[i] = rate_g_ebin * obstime
        final_protons[i] = rate_p_ebin * obstime * area_ratio_p
        # print(area_ratio_p)

        pre_gammas[i] = e_reco_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                 & (gammaness_g > best_g_cut) & (theta2_g < best_theta2_cut)].shape[0]
        pre_protons[i] = e_reco_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                  & (gammaness_p > best_g_cut) & p_contained].shape[0]

        weighted_gamma_per_ebin[i] = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & \
                                                            (e_reco_g > energy[i])]) * obstime
        weighted_proton_per_ebin[i] = events_bin_p.shape[0]

        gcut[i] = best_g_cut
        tcut[i] = best_theta2_cut.to_value()

        eff_g[i] = final_gammas[i] / weighted_gamma_per_ebin[i]
        eff_p[i] = final_protons[i] / weighted_proton_per_ebin[i]

    # n_excesses_min, sensitivity = calculate_sensitivity_lima(final_gammas, final_protons*noff,
    #                                                        1/noff * np.ones_like(final_gammas))
    n_excesses_min, sensitivity = calculate_sensitivity_lima(
        final_gammas, final_protons * noff,
        1 / noff * np.ones_like(final_gammas))

    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    min_pre_events = 5

    # Set conditions for calculating sensitivity

    conditions = ((sensitivity <= 0)
                  | (pre_gammas < min_pre_events)
                  | (pre_protons == 0)
                  | (final_gammas < min_num_events))

    sensitivity[conditions] = np.inf

    # Compute sensitivity in flux units
    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)

    sensitivity_flux = sensitivity / 100 * (dFdE * egeom * egeom).to(
        u.TeV / (u.cm**2 * u.s))

    print("\n******** Energy [TeV] *********\n")
    print(egeom)
    print("\nsensitivity flux:\n", sensitivity_flux)
    print("\nsensitivity[%]:\n", sensitivity)
    print("\n**************\n")

    list_of_tuples = list(
        zip(energy[:energy.shape[0] - 1].to_value(), energy[1:].to_value(),
            gcut, tcut, final_gammas, final_protons, gamma_rate,
            proton_rate, n_excesses_min, sensitivity,
            sensitivity_flux.to_value(), eff_g, eff_p, pre_gammas,
            pre_protons))

    result = pd.DataFrame(
        list_of_tuples,
        columns=[
            'ebin_low', 'ebin_up', 'gammaness_cut', 'theta2_cut',
            'gammas_reweighted', 'protons_reweighted', 'gamma_rate',
            'proton_rate', 'n_excesses_min', 'relative_sensitivity',
            'sensitivity_flux', 'eff_gamma', 'eff_proton', 'mc_gammas',
            'mc_protons'
        ])

    return energy, sensitivity, result, gammalike_events, gcut, tcut
コード例 #7
0
def sensitivity(simtelfile_gammas,
                simtelfile_protons,
                dl2_file_g,
                dl2_file_p,
                nfiles_gammas,
                nfiles_protons,
                n_bins_energy,
                gcut,
                tcut,
                noff,
                obstime=50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    n_bins_energy: `int` number of bins in energy
    n_bins_gammaness: `int` number of bins in gammaness
    n_bins_theta2: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    Returns
    ---------
    energy: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin

    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(
        simtelfile_gammas, dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, e_true_p, mc_par_p, events_p = process_mc(
        simtelfile_protons, dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev'] * nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev'] * nfiles_protons

    # Pass units to TeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.TeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.TeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.TeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.TeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to(u.cm**2)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to(u.cm**2)

    # Set binning for sensitivity calculation
    emin_sensitivity = 0.01 * u.TeV  # mc_par_g['emin']
    emax_sensitivity = 100 * u.TeV  # mc_par_g['emax']

    energy = np.logspace(np.log10(emin_sensitivity.to_value()),
                         np.log10(emax_sensitivity.to_value()),
                         n_bins_energy + 1) * u.TeV

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(energy)
    dFdEd0, proton_par = proton_bess(energy)

    bins = np.logspace(np.log10(emin_sensitivity.to_value()),
                       np.log10(emax_sensitivity.to_value()),
                       n_bins_energy + 1)
    y0 = mc_par_g['sim_ev'] / (mc_par_g['emax'].to_value() ** (mc_par_g['sp_idx'] + 1) \
                               - mc_par_g['emin'].to_value() ** (mc_par_g['sp_idx'] + 1)) \
         * (mc_par_g['sp_idx'] + 1)
    y = y0 * (bins[1:]**(crab_par['alpha'] + 1) -
              bins[:-1]**(crab_par['alpha'] + 1)) / (crab_par['alpha'] + 1)

    n_sim_bin = y

    # Rates and weights
    rate_g = rate("PowerLaw", mc_par_g['emin'], mc_par_g['emax'], crab_par,
                  mc_par_g['cone'], mc_par_g['area_sim'])

    rate_p = rate("PowerLaw", mc_par_p['emin'], mc_par_p['emax'], proton_par,
                  mc_par_p['cone'], mc_par_p['area_sim'])

    w_g = weight("PowerLaw", mc_par_g['emin'], mc_par_g['emax'],
                 mc_par_g['sp_idx'], rate_g, mc_par_g['sim_ev'], crab_par)

    w_p = weight("PowerLaw", mc_par_p['emin'], mc_par_p['emax'],
                 mc_par_p['sp_idx'], rate_p, mc_par_p['sim_ev'], proton_par)

    if (w_g.unit == u.Unit("sr / s")):
        print(
            "You are using diffuse gammas to estimate point-like sensitivity")
        print("These results will make no sense")
        w_g = w_g / u.sr  # Fix to make tests pass

    rate_weighted_g = ((e_true_g / crab_par['e0']) ** (crab_par['alpha'] - mc_par_g['sp_idx'])) \
                      * w_g
    rate_weighted_p = ((e_true_p / proton_par['e0']) ** (proton_par['alpha'] - mc_par_p['sp_idx'])) \
                      * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg,
                                               0.3 * u.deg)

    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * tcut / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(n_bins_energy))
    final_hadrons = np.ndarray(shape=(n_bins_energy))
    pre_gamma = np.ndarray(shape=(n_bins_energy))
    pre_hadrons = np.ndarray(shape=(n_bins_energy))

    ngamma_per_ebin = np.ndarray(n_bins_energy)
    nhadron_per_ebin = np.ndarray(n_bins_energy)

    # Weight events and count number of events per bin:
    for i in range(n_bins_energy):  # binning in energy
        rate_g_ebin = np.sum(rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                             & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])])


        rate_p_ebin = np.sum(rate_weighted_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                             & (gammaness_p > gcut[i]) & p_contained])
        final_gamma[i] = (rate_g_ebin * obstime).value
        final_hadrons[i] = (rate_p_ebin * obstime).value * area_ratio_p[i]

        pre_gamma[i] = e_reco_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                                & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])].shape[0]
        pre_hadrons[i] = e_reco_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                  & (gammaness_p > gcut[i]) & p_contained].shape[0]

        ngamma_per_ebin[i] = np.sum(
            rate_weighted_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i])].to(1 / u.s).value) \
                             * obstime.to(u.s).value
        nhadron_per_ebin[i] = np.sum(
            rate_weighted_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i])].to(1 / u.s).value) \
                              * obstime.to(u.s).value

    n_excesses_5sigma, sensitivity_3Darray = calculate_sensitivity_lima_ebin(
        final_gamma, final_hadrons * noff,
        1 / noff * np.ones(len(final_gamma)), n_bins_energy)
    # Avoid bins which are empty or have too few events:
    min_num_events = 5
    min_pre_events = 5
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization
    for i in range(0, n_bins_energy):
        conditions = (not np.isfinite(sensitivity_3Darray[i])) or (sensitivity_3Darray[i] <= 0) \
                     or (final_hadrons[i] < min_num_events) \
                     or (pre_gamma[i] < min_pre_events) \
                     or (pre_hadrons[i] < min_pre_events)
        if conditions:
            sensitivity_3Darray[i] = np.inf

    # Quantities to show in the results
    sensitivity = np.ndarray(shape=n_bins_energy)
    n_excesses_min = np.ndarray(shape=n_bins_energy)
    eff_g = np.ndarray(shape=n_bins_energy)
    eff_p = np.ndarray(shape=n_bins_energy)
    ngammas = np.ndarray(shape=n_bins_energy)
    nhadrons = np.ndarray(shape=n_bins_energy)
    gammarate = np.ndarray(shape=n_bins_energy)
    hadronrate = np.ndarray(shape=n_bins_energy)
    eff_area = np.ndarray(shape=n_bins_energy)
    nevents_gamma = np.ndarray(shape=n_bins_energy)
    nevents_proton = np.ndarray(shape=n_bins_energy)

    # Calculate the minimum sensitivity per energy bin
    for i in range(0, n_bins_energy):
        ngammas[i] = final_gamma[i]
        nhadrons[i] = final_hadrons[i]
        gammarate[i] = final_gamma[i] / (obstime.to(u.min)).to_value()
        hadronrate[i] = final_hadrons[i] / (obstime.to(u.min)).to_value()
        n_excesses_min[i] = n_excesses_5sigma[i]
        sensitivity[i] = sensitivity_3Darray[i]
        eff_g[i] = final_gamma[i] / ngamma_per_ebin[i]
        eff_p[i] = final_hadrons[i] / nhadron_per_ebin[i]

        e_aftercuts = e_true_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i]) \
                               & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])]

        e_aftercuts_p = e_true_p[(e_reco_p < energy[i + 1]) & (e_reco_p > energy[i]) \
                                 & (gammaness_p > gcut[i]) & p_contained]

        e_aftercuts_w = np.sum(
            np.power(e_aftercuts, crab_par['alpha'] - mc_par_g['sp_idx']))

        e_w = np.sum(
            np.power(
                e_true_g[(e_reco_g < energy[i + 1]) & (e_reco_g > energy[i])],
                crab_par['alpha'] - mc_par_g['sp_idx']))

        eff_area[i] = e_aftercuts_w.to_value(
        ) / n_sim_bin[i] * mc_par_g['area_sim'].to(u.m**2).to_value()

        nevents_gamma[i] = e_aftercuts.shape[0]
        nevents_proton[i] = e_aftercuts_p.shape[0]

    # Compute sensitivity  in flux units

    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)
    sensitivity_flux = sensitivity / 100 * (dFdE * egeom * egeom).to(
        u.erg / (u.cm**2 * u.s))

    print("\n******** Energy [TeV] *********\n")
    print(egeom)
    print("\nsensitivity flux:\n", sensitivity_flux)
    print("\nsensitivity[%]:\n", sensitivity)
    print("\n**************\n")

    list_of_tuples = list(
        zip(energy[:energy.shape[0] - 2].to_value(), energy[1:].to_value(),
            gcut, tcut, ngammas,
            nhadrons, gammarate, hadronrate, n_excesses_min,
            sensitivity_flux.to_value(), eff_area, eff_g, eff_p, nevents_gamma,
            nevents_proton))
    result = pd.DataFrame(list_of_tuples,
                          columns=[
                              'ebin_low', 'ebin_up', 'gammaness_cut',
                              'theta2_cut', 'n_gammas', 'n_hadrons',
                              'gamma_rate', 'hadron_rate', 'n_excesses_min',
                              'sensitivity', 'eff_area', 'eff_gamma',
                              'eff_hadron', 'nevents_g', 'nevents_p'
                          ])

    units = [
        energy.unit, energy.unit, "", tcut.unit, "", "", u.min**-1, u.min**-1,
        "", sensitivity_flux.unit, mc_par_g['area_sim'].to(u.cm**2).unit, "",
        "", "", ""
    ]

    # sensitivity_minimization_plot(n_bins_energy, n_bins_gammaness, n_bins_theta2, energy, sensitivity)
    # plot_positions_survived_events(events_g,
    #                                events_p,
    #                                gammaness_g, gammaness_p,
    #                                theta2_g, p_contained, sensitivity, energy, n_bins_energy, gcut, tcut)

    # Build dataframe of events that survive the cuts:
    events = pd.concat((events_g, events_p))
    dl2 = pd.DataFrame(columns=events.keys())

    for i in range(0, n_bins_energy):
        df_bin = events[(events.mc_energy < energy[i+1]) & (events.mc_energy > energy[i]) \
                               & (events.gammaness > gcut[i]) & (events.theta2 < tcut[i])]

        dl2 = pd.concat((dl2, df_bin))

    return energy, sensitivity, result, units, dl2
コード例 #8
0
def sens(simtelfile_gammas, simtelfile_protons,
         dl2_file_g, dl2_file_p,
         nfiles_gammas, nfiles_protons,
         eb, gb, tb, noff,
         obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    TODO: Give files as input in a configuration file!
    Returns
    E: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin
    ---------
    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, mc_par_g = process_mc(simtelfile_gammas,
                                                           dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, mc_par_p = process_mc(simtelfile_protons,
                                                             dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev']*nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev']*nfiles_protons

    #Pass units to GeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.GeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.GeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.GeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.GeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to( u.cm * u.cm)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to( u.cm * u.cm)

    #Set binning for sensitivity calculation
    emin_sens = 10**1 * u.GeV #mc_par_g['emin']
    emax_sens = 10**5 * u.GeV #mc_par_g['emax']

    E = np.logspace(np.log10(emin_sens.to_value()),
                np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    g, t = bin_definition(gb, tb)

    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                     mc_par_g['cone'], mc_par_g['area_sim'],
                     crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                     mc_par_p['cone'], mc_par_p['area_sim'],
                     proton_par['f0'], proton_par['e0'])


    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                    crab_par['alpha'], rate_g,
                    mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                    proton_par['alpha'], rate_p,
                    mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg, 0.1 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * t / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb, gb, tb))
    final_hadrons = np.ndarray(shape=(eb, gb, tb))


    # Weight events and count number of events per bin:
    for i in range(0,eb):  # binning in energy
        for j in range(0,gb):  # cut in gammaness
            for k in range(0,tb):  # cut in theta2
                eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                            & (gammaness_g > g[j]) & (theta2_g < t[k])])

                ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                            & (gammaness_p > g[j]) & p_contained])

                final_gamma[i][j][k] = eg_w_sum * obstime
                final_hadrons[i][j][k] = ep_w_sum * obstime * area_ratio_p[k]

    sens = calculate_sensitivity_lima(final_gamma, final_hadrons * noff, 1/noff)

    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization

    for i in range(0, eb):
        for j in range(0, gb):
            for k in range(0, tb):
                conditions = (not np.isfinite(sens[i,j,k])) or (sens[i,j,k]<=0) \
                             or (final_gamma[i,j,k] < min_num_events) \
                             or (final_hadrons[i,j,k] < min_num_events) \
                             or (not final_gamma[i,j,k] > final_hadrons[i,j,k] * 0.05)
                if conditions:
                    sens[i][j][k] = np.inf

    # Calculate the minimum sensitivity per energy bin
    sensitivity = np.ndarray(shape=eb)

    print("BEST CUTS: ")
    print("Energy bin(GeV) Gammaness Theta2(deg2) Ngamma Nbkg Ngamma/min Nbkg/min")
    for i in range(0,eb):
        ind = np.unravel_index(np.nanargmin(sens[i], axis=None), sens[i].shape)
        print("%.2f" % E[i].to_value(),"-","%.2f" % E[i+1].to_value(),"%.2f" % g[ind[0]],
              "%.2f" % t[ind[1]].to_value(), "%.2f" % final_gamma[i][ind],
              "%.2f" % final_hadrons[i][ind], "%.2f" % (final_gamma[i][ind]/(60*50)),
              "%.2f" % (final_hadrons[i][ind]/(60*50)))
        sensitivity[i] = sens[i][ind]

    return E, sensitivity
コード例 #9
0
ファイル: sensitivity.py プロジェクト: pillera/cta-lstchain
def sens(simtelfile_gammas, simtelfile_protons,
         dl2_file_g, dl2_file_p,
         nfiles_gammas, nfiles_protons,
         eb, gcut, tcut, noff,
         obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    simtelfile_gammas: `string` path to simtelfile of gammas with mc info
    simtelfile_protons: `string` path to simtelfile of protons with mc info
    dl2_file_g: `string` path to h5 file of reconstructed gammas
    dl2_file_p: `string' path to h5 file of reconstructed protons
    nfiles_gammas: `int` number of simtel gamma files reconstructed
    nfiles_protons: `int` number of simtel proton files reconstructed
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region
    obstime: `Quantity` Observation time in seconds

    TODO: Give files as input in a configuration file!
    Returns
    E: `array` center of energy bins
    sensitivity: `array` sensitivity per energy bin
    ---------
    """

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, e_true_g, mc_par_g, events_g = process_mc(simtelfile_gammas,
                                                           dl2_file_g, 'gamma')
    gammaness_p, angdist2_p, e_reco_p, e_true_p, mc_par_p, events_p = process_mc(simtelfile_protons,
                                                             dl2_file_p, 'proton')

    mc_par_g['sim_ev'] = mc_par_g['sim_ev']*nfiles_gammas
    mc_par_p['sim_ev'] = mc_par_p['sim_ev']*nfiles_protons

    #Pass units to GeV and cm2
    mc_par_g['emin'] = mc_par_g['emin'].to(u.GeV)
    mc_par_g['emax'] = mc_par_g['emax'].to(u.GeV)

    mc_par_p['emin'] = mc_par_p['emin'].to(u.GeV)
    mc_par_p['emax'] = mc_par_p['emax'].to(u.GeV)

    mc_par_g['area_sim'] = mc_par_g['area_sim'].to(u.cm**2)
    mc_par_p['area_sim'] = mc_par_p['area_sim'].to(u.cm**2)

    #Set binning for sensitivity calculation
    emin_sens = 10**1 * u.GeV #mc_par_g['emin']
    emax_sens = 10**5 * u.GeV #mc_par_g['emax']

    E = np.logspace(np.log10(emin_sens.to_value()),
                np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    #Number of simulated events per energy bin
    """
    bins, n_sim_bin = power_law_integrated_distribution(emin_sens.to_value(),
                                                        emax_sens.to_value(),
                                                        mc_par_g['sim_ev'],
                                                        mc_par_g['sp_idx'], eb+1)


    """
    # Extract spectral parameters
    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    bins = np.logspace(np.log10(emin_sens.to_value()), np.log10(emax_sens.to_value()), eb+1)
    y0 = mc_par_g['sim_ev'] / (mc_par_g['emax'].to_value()**(mc_par_g['sp_idx'] + 1) \
                               - mc_par_g['emin'].to_value()**(mc_par_g['sp_idx'] + 1)) \
        * (mc_par_g['sp_idx'] + 1)
    y = y0 * (bins[1:]**(crab_par['alpha'] + 1) - bins[:-1]**(crab_par['alpha'] + 1)) / (crab_par['alpha'] + 1)

    n_sim_bin = y


    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], crab_par['alpha'],
                     mc_par_g['cone'], mc_par_g['area_sim'],
                     crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], proton_par['alpha'],
                     mc_par_p['cone'], mc_par_p['area_sim'],
                     proton_par['f0'], proton_par['e0'])

    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                    crab_par['alpha'], rate_g,
                    mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                    proton_par['alpha'], rate_p,
                    mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
                * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_p['sp_idx'])) \
                * w_p

    p_contained, ang_area_p = ring_containment(angdist2_p, 0.4 * u.deg, 0.2 * u.deg)
    # FIX: ring_radius and ring_halfwidth should have units of deg
    # FIX: hardcoded at the moment, but ring_radius should be read from
    # the gamma file (point-like) or given as input (diffuse).
    # FIX: ring_halfwidth should be given as input
    area_ratio_p = np.pi * tcut / ang_area_p
    # ratio between the area where we search for protons ang_area_p
    # and the area where we search for gammas math.pi * t

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb))
    final_hadrons = np.ndarray(shape=(eb))
    pre_gamma = np.ndarray(shape=(eb))
    pre_hadrons = np.ndarray(shape=(eb))

    ngamma_per_ebin = np.ndarray(eb)
    nhadron_per_ebin = np.ndarray(eb)

    # Weight events and count number of events per bin:
    for i in range(0,eb):  # binning in energy
        eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                    & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])])

        ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                    & (gammaness_p > gcut[i]) & p_contained])
        final_gamma[i] = eg_w_sum * obstime
        final_hadrons[i] = ep_w_sum * obstime * area_ratio_p[i]

        pre_gamma[i] = e_reco_g[(e_reco_g < E[i+1]) & (e_reco_g > E[i]) \
                                & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])].shape[0]
        pre_hadrons[i] = e_reco_p[(e_reco_p < E[i+1]) & (e_reco_p > E[i]) \
                                  & (gammaness_p > gcut[i]) & p_contained].shape[0]

        ngamma_per_ebin[i] = np.sum(e_reco_gw[(e_reco_g < E[i+1]) & (e_reco_g > E[i])]) * obstime
        nhadron_per_ebin[i] = np.sum(e_reco_pw[(e_reco_p < E[i+1]) & (e_reco_p > E[i])]) * obstime

    nex_5sigma, sens = calculate_sensitivity_lima_1d(final_gamma, final_hadrons * noff, 1/noff,
                                                  eb)
    # Avoid bins which are empty or have too few events:
    min_num_events = 10
    min_pre_events = 10
    # Minimum number of gamma and proton events in a bin to be taken into account for minimization
    for i in range(0, eb):
        conditions = (not np.isfinite(sens[i])) or (sens[i]<=0) \
                     or (final_hadrons[i] < min_num_events) \
                     or (pre_gamma[i] < min_pre_events) \
                     or (pre_hadrons[i] < min_pre_events)
        if conditions:
            sens[i] = np.inf

    #Quantities to show in the results
    sensitivity = np.ndarray(shape=eb)
    nex_min = np.ndarray(shape=eb)
    eff_g = np.ndarray(shape=eb)
    eff_p = np.ndarray(shape=eb)
    ngammas = np.ndarray(shape=eb)
    nhadrons = np.ndarray(shape=eb)
    gammarate = np.ndarray(shape=eb)
    hadronrate = np.ndarray(shape=eb)
    eff_area = np.ndarray(shape=eb)
    nevents_gamma = np.ndarray(shape=eb)
    nevents_proton = np.ndarray(shape=eb)

    # Calculate the minimum sensitivity per energy bin
    for i in range(0,eb):
        ngammas[i] = final_gamma[i]
        nhadrons[i] = final_hadrons[i]
        gammarate[i] = final_gamma[i]/(obstime.to(u.min)).to_value()
        hadronrate[i] = final_hadrons[i]/(obstime.to(u.min)).to_value()
        nex_min[i] =  nex_5sigma[i]
        sensitivity[i] = sens[i]
        eff_g[i] = final_gamma[i]/ngamma_per_ebin[i]
        eff_p[i] = final_hadrons[i]/nhadron_per_ebin[i]

        e_aftercuts = e_true_g[(e_true_g < E[i+1]) & (e_true_g > E[i]) \
                               & (gammaness_g > gcut[i]) & (theta2_g < tcut[i])]

        e_aftercuts_p = e_true_p[(e_true_p < E[i+1]) & (e_true_p > E[i]) \
                                 & (gammaness_p > gcut[i]) & p_contained]

        e_aftercuts_w = np.sum(np.power(e_aftercuts, crab_par['alpha']-mc_par_g['sp_idx']))

        e_w = np.sum(np.power(e_true_g[(e_true_g < E[i+1]) & (e_true_g > E[i])],
                              crab_par['alpha']-mc_par_g['sp_idx']))

        eff_area[i] = e_aftercuts_w.to_value() / n_sim_bin[i] * mc_par_g['area_sim'].to(u.m**2).to_value()

        nevents_gamma[i] = e_aftercuts.shape[0]
        nevents_proton[i] = e_aftercuts_p.shape[0]

    #Compute sensitivity  in flux units

    emed = np.sqrt(E[1:] * E[:-1])
    dFdE, par = crab_magic(emed)
    sens_flux = sensitivity / 100 * (dFdE * emed * emed).to(u.erg / (u.cm**2 * u.s))

    list_of_tuples = list(zip(E[:E.shape[0]-2].to_value(), E[1:].to_value(), gcut, tcut,
                            ngammas, nhadrons,
                            gammarate, hadronrate,
                            nex_min, sens_flux.to_value(), eff_area,
                              eff_g, eff_p, nevents_gamma, nevents_proton))
    result = pd.DataFrame(list_of_tuples,
                           columns=['ebin_low', 'ebin_up', 'gammaness_cut', 'theta2_cut',
                                    'n_gammas', 'n_hadrons',
                                    'gamma_rate', 'hadron_rate',
                                    'nex_min', 'sensitivity','eff_area',
                                    'eff_gamma', 'eff_hadron',
                                    'nevents_g', 'nevents_p'])

    units = [E.unit, E.unit,"", tcut.unit,"", "",
             u.min**-1, u.min**-1, "",
             sens_flux.unit, mc_par_g['area_sim'].to(u.m**2).unit, "", "", "", ""]

    """
    sens_minimization_plot(eb, gb, tb, E, sens)
    
    plot_positions_survived_events(events_g,
                                   events_p,
                                   gammaness_g, gammaness_p,
                                   theta2_g, p_contained, sens, E, eb, gcut, tcut)
    """
    # Build dataframe of events that survive the cuts:
    events = pd.concat((events_g, events_p))
    dl2 = pd.DataFrame(columns=events.keys())
    for i in range(0,eb):
        df_bin = events[(10**events.mc_energy < E[i+1]) & (10**events.mc_energy > E[i]) \
                               & (events.gammaness > gcut[i]) & (events.theta2 < tcut[i])]

        dl2 = pd.concat((dl2, df_bin))

    return E, sensitivity, result, units, dl2
コード例 #10
0
def sensitivity(emin_sens, emax_sens, eb, gb, tb, noff, obstime = 50 * 3600 * u.s):
    """
    Main function to calculate the sensitivity given a MC dataset

    Parameters
    ---------
    eb: `int` number of bins in energy
    gb: `int` number of bins in gammaness
    tb: `int` number of bins in theta2
    noff: `float` ratio between the background and the signal region

    TODO: Give files as input in a configuration file!
    Returns
    ---------

    """
   
    # Read files
    simtelfile_gammas = "/home/queenmab/DATA/LST1/Gamma/gamma_20deg_0deg_run8___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz"
    simtelfile_protons = "/home/queenmab/DATA/LST1/Proton/proton_20deg_0deg_run194___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz"
    PATH_EVENTS = "../../cta-lstchain-extra/reco/sample_data/dl2/"
    dl2_file_g = PATH_EVENTS+"/reco_gammas.h5" 
    dl2_file_p = PATH_EVENTS+"/reco_protons.h5"

    # Extract spectral parameters
    E = np.logspace(np.log10(emin_sens.to_value()), 
                     np.log10(emax_sens.to_value()), eb + 1) * u.GeV

    dFdE, crab_par = crab_hegra(E)
    dFdEd0, proton_par = proton_bess(E)

    # Read simulated and reconstructed values
    gammaness_g, theta2_g, e_reco_g, mc_par_g = process_mc(simtelfile_gammas, dl2_file_g)
    gammaness_p, theta2_p, e_reco_p, mc_par_p = process_mc(simtelfile_protons, dl2_file_p)

    # Rates and weights
    rate_g = rate(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'], \
                  mc_par_g['cone'], mc_par_g['area_sim'], crab_par['f0'], crab_par['e0'])

    rate_p = rate(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'], \
                  mc_par_p['cone'], mc_par_p['area_sim'], proton_par['f0'], proton_par['e0'])


    w_g = weight(mc_par_g['emin'], mc_par_g['emax'], mc_par_g['sp_idx'],
                 crab_par['alpha'], rate_g, mc_par_g['sim_ev'], crab_par['e0'])

    w_p = weight(mc_par_p['emin'], mc_par_p['emax'], mc_par_p['sp_idx'],
                 proton_par['alpha'], rate_p, mc_par_p['sim_ev'], proton_par['e0'])


    e_reco_gw = ((e_reco_g / crab_par['e0'])**(crab_par['alpha'] - mc_par_g['sp_idx'])) \
        * w_g
    e_reco_pw = ((e_reco_p / proton_par['e0'])**(proton_par['alpha'] - mc_par_g['sp_idx'])) \
        * w_p

    # Arrays to contain the number of gammas and hadrons for different cuts
    final_gamma = np.ndarray(shape=(eb, gb, tb))
    final_hadrons = np.ndarray(shape=(eb, gb, tb))

    g, t = bin_definition(gb, tb)

    for i in range(0,eb):  # binning in energy
        for j in range(0,gb):  # cut in gammaness
            for k in range(0,tb):  # cut in theta2
                eg_w_sum = np.sum(e_reco_gw[(e_reco_g < E[i+1].to_value()) & (e_reco_g > E[i].to_value()) \
                                         & (gammaness_g > g[j]) & (theta2_g < t[k])])

                ep_w_sum = np.sum(e_reco_pw[(e_reco_p < E[i+1].to_value()) & (e_reco_p > E[i].to_value()) \
                                         & (gammaness_p > g[j]) & (theta2_p < t[k])])
            
                final_gamma[i][j][k] = eg_w_sum * obstime
                final_hadrons[i][j][k] = ep_w_sum * obstime
                    
    sens = calculate_sensitivity(final_gamma.to_value(), final_hadrons.to_value(), 1/noff)
    
    # Calculate the minimum sensitivity per energy bin
    sensitivity = np.ndarray(shape=eb)
    for i in range(0,eb):
        ind = np.unravel_index(np.argmin(sens[i], axis=None), sens[i].shape)
        sensitivity[i] = sens[i][ind]

    sens_minimization_plot(eb, gb, tb, E, sens)
    sens_plot(eb, E, sensitivity)
コード例 #11
0
def main():
    ntelescopes_gamma = 4
    ntelescopes_protons = 1
    n_bins_energy = 20  #  Number of energy bins
    n_bins_gammaness = 10  #  Number of gammaness bins
    n_bins_theta2 = 10  #  Number of theta2 bins
    obstime = 50 * 3600 * u.s
    noff = 5

    # Finds the best cuts for the computation of the sensitivity
    '''energy, best_sens, result, units, gcut, tcut = find_best_cuts_sensitivity(args.dl1file_gammas,
                                                                              args.dl1file_protons,
                                                                              args.dl2_file_g_sens,
                                                                              args.dl2_file_p_sens,
                                                                              ntelescopes_gamma, ntelescopes_protons,
                                                                              n_bins_energy, n_bins_gammaness,
                                                                              n_bins_theta2, noff,
                                                                              obstime)
    '''
    #For testing using fixed cuts
    gcut = np.ones(n_bins_energy) * 0.8
    tcut = np.ones(n_bins_energy) * 0.01

    print("\nApplying optimal gammaness cuts:", gcut)
    print("Applying optimal theta2 cuts: {} \n".format(tcut))

    # Computes the sensitivity
    energy, best_sens, result, units, dl2 = sensitivity(
        args.dl1file_gammas, args.dl1file_protons,
        args.dl2_file_g_cuts, args.dl2_file_p_cuts, 1, 1, n_bins_energy, gcut,
        tcut * (u.deg**2), noff, obstime)

    egeom = np.sqrt(energy[1:] * energy[:-1])
    dFdE, par = crab_hegra(egeom)
    sensitivity_flux = best_sens / 100 * (dFdE * egeom * egeom).to(
        u.erg / (u.cm**2 * u.s))

    # Saves the results
    dl2.to_hdf('test_sens.h5', key='data')
    result.to_hdf('test_sens.h5', key='results')

    tab = Table.from_pandas(result)

    for i, key in enumerate(tab.columns.keys()):
        tab[key].unit = units[i]
        if key == 'sensitivity':
            continue
        tab[key].format = '8f'

    # Plots

    plt.figure(figsize=(12, 8))
    plt.plot(egeom[:-1], tab['hadron_rate'], label='Hadron rate', marker='o')
    plt.plot(egeom[:-1], tab['gamma_rate'], label='Gamma rate', marker='o')
    plt.legend()
    plt.xscale('log')
    plt.xlabel('Energy (TeV)')
    plt.ylabel('events / min')
    plt.show()
    plt.savefig("rates.png")

    plt.figure(figsize=(12, 8))
    gammas_mc = dl2[dl2.mc_type == 0]
    protons_mc = dl2[dl2.mc_type == 101]
    sns.distplot(gammas_mc.gammaness, label='gammas')
    sns.distplot(protons_mc.gammaness, label='protons')
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("distplot_gammaness.png")

    plt.figure(figsize=(12, 8))
    sns.distplot(gammas_mc.mc_energy, label='gammas')
    sns.distplot(protons_mc.mc_energy, label='protons')
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("distplot_mc_energy.png")

    plt.figure(figsize=(12, 8))
    sns.distplot(gammas_mc.reco_energy.apply(np.log10), label='gammas')
    sns.distplot(protons_mc.reco_energy.apply(np.log10), label='protons')
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("distplot_energy_apply.png")

    plt.figure(figsize=(12, 8))
    ctaplot.plot_theta2(gammas_mc.reco_alt,
                        gammas_mc.reco_az,
                        gammas_mc.mc_alt,
                        gammas_mc.mc_az,
                        range=(0, 1),
                        bins=100)
    plt.show()
    plt.savefig("theta2.png")

    plt.figure(figsize=(12, 8))
    ctaplot.plot_angular_resolution_per_energy(gammas_mc.reco_alt,
                                               gammas_mc.reco_az,
                                               gammas_mc.mc_alt,
                                               gammas_mc.mc_az,
                                               gammas_mc.reco_energy)
    ctaplot.plot_angular_resolution_cta_requirement('north', color='black')

    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("angular_resolution.png")

    plt.figure(figsize=(12, 8))
    ctaplot.plot_energy_resolution(gammas_mc.mc_energy, gammas_mc.reco_energy)
    ctaplot.plot_energy_resolution_cta_requirement('north', color='black')
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("effective_area.png")

    plt.figure(figsize=(12, 8))
    ctaplot.plot_energy_bias(gammas_mc.mc_energy, gammas_mc.reco_energy)
    plt.show()
    plt.savefig("energy_bias.png")

    plt.figure(figsize=(12, 8))
    gamma_ps_simu_info = read_simu_info_merged_hdf5(args.dl1file_gammas)
    emin = gamma_ps_simu_info.energy_range_min.value
    emax = gamma_ps_simu_info.energy_range_max.value
    total_number_of_events = gamma_ps_simu_info.num_showers * gamma_ps_simu_info.shower_reuse
    spectral_index = gamma_ps_simu_info.spectral_index
    area = (gamma_ps_simu_info.max_scatter_range.value -
            gamma_ps_simu_info.min_scatter_range.value)**2 * np.pi
    ctaplot.plot_effective_area_per_energy_power_law(
        emin,
        emax,
        total_number_of_events,
        spectral_index,
        gammas_mc.reco_energy[gammas_mc.tel_id == 1],
        area,
        label='selected gammas',
        linestyle='--')

    ctaplot.plot_effective_area_cta_requirement('north', color='black')
    plt.ylim([2 * 10**3, 10**6])
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig("effective_area.png")

    plt.figure(figsize=(12, 8))
    plt.plot(energy[0:len(sensitivity_flux)],
             sensitivity_flux,
             '-',
             color='red',
             markersize=0,
             label='LST mono')
    plt.xscale('log')
    plt.yscale('log')

    plt.ylabel('$\mathsf{E^2 F \; [erg \, cm^{-2} s^{-1}]}$', fontsize=16)
    plt.xlabel('E [TeV]')
    plt.xlim([10**-2, 100])
    plt.ylim([10**-14, 10**-9])
    plt.tight_layout()
    plt.savefig('sensitivity.png')

    plt.figure(figsize=(12, 8))
    ctaplot.plot_energy_resolution(gammas_mc.mc_energy,
                                   gammas_mc.reco_energy,
                                   percentile=68.27,
                                   confidence_level=0.95,
                                   bias_correction=False)
    ctaplot.plot_energy_resolution_cta_requirement('north', color='black')
    plt.xscale('log')
    plt.ylabel('\u0394 E/E 68\%')
    plt.xlabel('E [TeV]')
    plt.xlim([10**-2, 100])
    plt.ylim([0.08, 0.48])
    plt.tight_layout()

    plt.savefig('energy_resolution.png', dpi=100)