示例#1
0
    def guess_discovery_potential(self, source_path):
        sources = load_catalogue(source_path)

        for inj in self.injectors.values():
            inj.update_sources(sources)

        return estimate_discovery_potential(self.injectors, sources, self.llh_dict)
示例#2
0
def agn_subset_catalogue(agn_type, xray_cat, n_sources):
    subset_path = agn_subset_catalogue_name(agn_type, xray_cat, n_sources)
    if not os.path.isfile(subset_path):
        parent_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
        parent_cat = np.sort(parent_cat, order="base_weight")[::-1]
        new_cat = parent_cat[:n_sources]
        print("Catalogue not found. Creating one at:", subset_path)
        np.save(subset_path, new_cat)
    return subset_path
示例#3
0
def agn_subset_catalogue_north(agn_type, xray_cat, n_sources):
    subset_path = agn_subset_catalogue_name_north(agn_type, xray_cat, n_sources)
    if not os.path.isfile(subset_path):
        parent_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
        parent_cat = parent_cat[parent_cat["dec_rad"]>np.deg2rad(-5)]
        parent_cat = np.sort(parent_cat, order="base_weight")[::-1]
        new_cat = parent_cat[:n_sources]
        print("Catalogue not found. Creating one at:", subset_path)
        print (new_cat)
        np.save(subset_path, new_cat)
        print ("Catalogue length is: ", len(subset_path))
    return subset_path
示例#4
0
def agn_subset_catalogue_sindec_cut(agn_type, xray_cat, n_sources, sindec_cut):
    subset_path = agn_subset_catalogue_name_sindec_cut(agn_type, xray_cat, n_sources, sindec_cut)
    if not os.path.isfile(subset_path):
        parent_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
        print ("Original catalogue (before north + pole + nrsrc selection) is: ", len(parent_cat))
        print(parent_cat["dec_rad"][:10])
        parent_cat = parent_cat[np.sin(parent_cat["dec_rad"])<sindec_cut]
        print("Original catalogue after sindec cut is: ", len(parent_cat))
        parent_cat = np.sort(parent_cat, order="base_weight")[::-1]
        new_cat = parent_cat[:n_sources]
        print("Catalogue not found. Creating one at:", subset_path)
        np.save(subset_path, new_cat)
    return subset_path
示例#5
0
def agn_subset_catalogue_no_pole(agn_type, xray_cat, n_sources):
    subset_path = agn_subset_catalogue_name_no_pole(agn_type, xray_cat, n_sources)
    if not os.path.isfile(subset_path):
        parent_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
        print ("Original catalogue (before north + pole + nrsrc selection) is: ", len(parent_cat))
        # parent_cat = parent_cat[parent_cat["dec_rad"]>np.deg2rad(-5)]
        # print("Original catalogue (after north selection) is: ", len(parent_cat))
        parent_cat = parent_cat[parent_cat["dec_rad"] < np.deg2rad(80)]
        print("Original catalogue (after pole selection) is: ", len(parent_cat))

        parent_cat = np.sort(parent_cat, order="base_weight")[::-1]
        new_cat = parent_cat[:n_sources]
        print("Catalogue not found. Creating one at:", subset_path)
        np.save(subset_path, new_cat)
    return subset_path
示例#6
0
        def check_unblind(self):
            print("\n")
            print("You are proposing to unblind data.")
            print("\n")
            confirm()
            print("\n")
            print("You are unblinding the following catalogue:")
            print("\n")
            print(self.unblind_dict["catalogue"])
            print("\n")
            confirm()
            print("\n")
            print("The catalogue has the following entries:")
            print("\n")

            cat = load_catalogue(self.unblind_dict["catalogue"])

            print(cat.dtype.names)
            print(len(cat), np.sum(cat['base_weight']))
            print(cat)
            print("\n")
            confirm()
            print("\n")
            print("The following datasets will be used:")
            print("\n")
            for x in self.unblind_dict["dataset"].values():
                print(x.sample_name, x.season_name)
                print("\n")
                print(x.exp_path)
                print(x.pseudo_mc_path)
                print("\n")
            confirm()
            print("\n")
            print("The following LLH will be used:")
            print("\n")
            for (key, val) in self.unblind_dict["llh_dict"].items():
                print(key, val)
            print("\n")
            confirm()
            print("\n")
            print("Are you really REALLY sure about this?")
            print("You will unblind. This is your final warning.")
            print("\n")
            confirm()
            print("\n")
            print("OK, you asked for it...")
            print("\n")
running_time = []
for (cat_type, method) in complete_cats_north[:1]:

    unique_key = cat_type + "_" + method

    print(unique_key)

    gamma_dict = dict()

    for gamma_index in gammas:
        res = dict()
        for j, nr_srcs in enumerate(nr_brightest_sources):

            cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
            print("Loading catalogue", cat_path, " with ", nr_srcs, "sources")
            catalogue = load_catalogue(cat_path)
            cat = np.load(cat_path)
            print("Total flux is: ", cat["base_weight"].sum() * 1e-13)
            full_name = generate_name(unique_key, nr_srcs, gamma_index)

            res_e_min = dict()
            # scale factor of neutrino injection, tuned for each energy bin
            scale_factor_per_decade = [0.2, 0.5, 1, 0.57, 0.29]

            for i, (e_min, e_max) in enumerate(bins[:]):
                full_name_en = full_name + "Emin={0:.2f}".format(e_min) + "/"

                print("Full name for ", nr_srcs, " sources is", full_name_en)

                injection_time = llh_time
                injection_energy = dict(llh_energy)
nr_brightest_sources = [15887]

all_res = dict()

# Loop over the gammas for the LLAGN sample with 15887 sources
for (cat_type, method) in complete_cats_north[-1:]:

    unique_key = cat_type + "_" + method

    gamma_dict = dict()

    for gamma_index in gammas:
        res = dict()
        nr_srcs = int(nr_brightest_sources[0])
        cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
        catalogue = load_catalogue(cat_path)
        cat = np.load(cat_path)

        name = generate_name(unique_key, nr_srcs, gamma_index)
        bkg_ts = bkg_ts_base_name(unique_key, nr_srcs)

        injection_time = llh_time
        injection_energy = dict(llh_energy)
        injection_energy["gamma"] = gamma_index

        inj_kwargs = {
            "injection_energy_pdf": injection_energy,
            "injection_sig_time_pdf": injection_time,
        }

        unblind_dict = {
                    # disc_convert = rh.disc_potential_25/rh.disc_potential
                    # #
                    # sens.append(astro_sens[key] * inj_time)
                    # disc.append(astro_disc[key] * inj_time)
                    # #
                    # # # print astro_disc[key], rh.disc_potential, guess_disc
                    # # # raw_input("prompt")
                    # #
                    # disc_25.append(astro_disc[key] * inj_time * disc_convert)
                    # #
                    # sens_e.append(astro_sens[e_key] * inj_time)
                    # disc_e.append(astro_disc[e_key] * inj_time)
                    # disc_25_e.append(astro_disc[e_key] * inj_time * disc_convert)

                    cat = load_catalogue(rh_dict["catalogue"])

                    try:

                        guess = k_to_flux(
                            rh_dict["scale"] / 1.5
                        )

                    except KeyError:
                        guess = np.nan

                    astro_guess = calculate_astronomy(
                        guess, rh_dict["inj_dict"]["injection_energy_pdf"], cat
                    )

                    guess_disc.append(astro_guess[key] * inj_time)
示例#10
0
    def __init__(self, rh_dict, do_sens=True, do_disc=True, bias_error="std"):

        self.sources = load_catalogue(rh_dict["catalogue"])

        self.name = rh_dict["name"]
        self.mh_name = rh_dict["mh_name"]
        self.scale = rh_dict["scale"]

        self.results = dict()
        self.pickle_output_dir = name_pickle_output_dir(self.name)
        self.plot_dir = plot_output_dir(self.name)
        self.merged_dir = os.path.join(self.pickle_output_dir, "merged")

        self.allow_extrapolation = rh_dict.get(
            "allow_extrapolated_sensitivity", True)

        # Checks if the code should search for flares. By default, this is
        # not done.
        # self.flare = self.mh_name == "flare"

        # if self.flare:
        #     self.make_plots = self.flare_plots
        # else:
        self.make_plots = self.noflare_plots
        self.bias_error = bias_error

        # Checks whether negative n_s is fit or not
        #
        # try:
        #     self.negative_n_s = llh_kwargs["Fit Negative n_s?"]
        # except KeyError:
        #     self.negative_n_s = False
        #
        # try:
        #     self.fit_weights = llh_kwargs["Fit Weights?"]
        # except KeyError:
        #     self.fit_weights = False

        # Sets default Chi2 distribution to fit to background trials
        #
        # if self.fit_weights:
        #     self.ts_type = "Fit Weights"
        # elif self.flare:
        #     self.ts_type = "Flare"
        # elif self.negative_n_s:
        #     self.ts_type = "Negative n_s"
        # else:
        self.ts_type = get_ts_fit_type(rh_dict)

        # print "negative_ns", self.negative_n_s

        p0, bounds, names = MinimisationHandler.find_parameter_info(rh_dict)

        # p0, bounds, names = fit_setup(llh_kwargs, self.sources, self.flare)
        self.param_names = names
        self.bounds = bounds
        self.p0 = p0

        # if cleanup:
        #     self.clean_merged_data()

        # this will have the TS threshold values as keys and a tuple containing
        # (injection scale, relative overfluctuations, error on overfluctuations)
        # as values
        self.overfluctuations = dict()

        self.sensitivity = np.nan
        self.sensitivity_err = np.nan
        self.bkg_median = np.nan
        self.frac_over = np.nan
        self.disc_potential = np.nan
        self.disc_err = np.nan
        self.disc_potential_25 = np.nan
        self.disc_ts_threshold = np.nan
        self.extrapolated_sens = False
        self.extrapolated_disc = False
        self.flux_to_ns = np.nan

        # if self.show_inj:
        self.inj = self.load_injection_values()
        self._inj_dict = rh_dict["inj_dict"]
        self._dataset = rh_dict["dataset"]
        # else:
        #     self.inj = None

        try:
            self.merge_pickle_data()
        except FileNotFoundError:
            logger.warning("No files found at {0}".format(
                self.pickle_output_dir))

        try:
            self.find_ns_scale()
        except ValueError as e:
            logger.warning(
                "RuntimeError for ns scale factor: \n {0}".format(e))
        except IndexError as e:
            logger.warning(
                f"IndexError for ns scale factor. Only background trials?")

        self.plot_bias()

        if do_sens:
            try:
                self.find_sensitivity()
            except ValueError as e:
                logger.warning(
                    "RuntimeError for discovery potential: \n {0}".format(e))

        if do_disc:
            try:
                self.find_disc_potential()
            except RuntimeError as e:
                logger.warning(
                    "RuntimeError for discovery potential: \n {0}".format(e))
            except TypeError as e:
                logger.warning(
                    "TypeError for discovery potential: \n {0}".format(e))
            except ValueError as e:
                logger.warning(
                    "TypeError for discovery potential: \n {0}".format(e))