def test_declination_sensitivity(self):

        logging.info("Testing 'fixed_weight' MinimisationHandler class")

        for i, e_pdf_dict in enumerate(energy_pdfs):

            llh_dict = {
                "llh_name": "fixed_energy",
                "llh_sig_time_pdf": {
                    "time_pdf_name": "steady"
                },
                "llh_bkg_time_pdf": {
                    "time_pdf_name": "steady",
                },
                "llh_energy_pdf": e_pdf_dict
            }

            unblind_dict = {
                "name": "tests/test_energy_pdfs/",
                "mh_name": "fixed_weights",
                "dataset":
                icecube_ps_3_year.get_seasons('IC79-2010', 'IC86-2011'),
                "catalogue": catalogue,
                "llh_dict": llh_dict,
            }

            ub = create_unblinder(unblind_dict)
            key = [x for x in ub.res_dict.keys() if x != "TS"][0]
            res = ub.res_dict[key]

            logging.info("Best fit values {0}".format(list(res["x"])))
            logging.info("Reference best fit {0}".format(true_parameters[i]))

            for j, x in enumerate(list(res["x"])):
                self.assertAlmostEqual(x, true_parameters[i][j], delta=0.1)
    def test_declination_sensitivity(self):

        logging.info("Testing 'large_catalogue' MinimisationHandler class "
                     "with {0} sources and IC40 data".format(n_sources))

        # Test stacking

        unblind_dict = {
            "name": "test/test_large_catalogue/",
            "mh_name": "large_catalogue",
            "dataset": icecube_ps_3_year.get_seasons("IC79-2010"),
            "catalogue": catalogue,
            "llh_dict": llh_dict,
            "inj_dict": {}
        }

        ub = create_unblinder(unblind_dict)
        key = [x for x in ub.res_dict.keys() if x != "TS"][0]
        res = ub.res_dict[key]

        logging.info("Best fit values {0}".format(list(res["x"])))
        logging.info("Reference best fit {0}".format(true_parameters[0]))

        for i, x in enumerate(res["x"]):
            self.assertAlmostEqual(x, true_parameters[0][i], delta=0.1)
Beispiel #3
0
    def test_declination_sensitivity(self):

        logging.info("Testing 'standard' LLH class")

        # Test three declinations

        for j, sindec in enumerate(sindecs):

            unblind_dict = {
                "name": "tests/test_llh_standard/",
                "mh_name": "fixed_weights",
                "dataset": icecube_ps_3_year.get_seasons("IC86-2011"),
                "catalogue": ps_catalogue_name(sindec),
                "llh_dict": llh_dict,
            }

            ub = create_unblinder(unblind_dict)
            key = [x for x in ub.res_dict.keys() if x != "TS"][0]
            res = ub.res_dict[key]

            logging.info("Best fit values {0}".format(list(res["x"])))
            logging.info("Reference best fit {0}".format(true_parameters[j]))

            for i, x in enumerate(res["x"]):
                self.assertAlmostEqual(x, true_parameters[j][i], delta=0.1)
    def test_spatial(self):
        logging.info("Testing 'spatial' LLH class")

        ub = create_unblinder(unblind_dict)
        key = [x for x in ub.res_dict.keys() if x != "TS"][0]
        res = ub.res_dict[key]

        logging.info("Best fit values {0}".format(list(res["x"])))
        logging.info("Reference best fit {0}".format(true_parameters))

        self.assertAlmostEqual(list(res["x"])[0], true_parameters, delta=0.1)
Beispiel #5
0
    def test_custom_dataset(self):

        logging.info("Testing custom_dataset util function.")

        llh_dict = {
            "llh_name": "standard",
            "llh_sig_time_pdf": {
                "time_pdf_name": "box",
                "pre_window": 0.,
                "post_window": 100.
            },
            "llh_bkg_time_pdf": {
                "time_pdf_name": "steady",
            },
            "llh_energy_pdf": {
                "energy_pdf_name": "power_law"
            }
        }

        dataset = custom_dataset(icecube_ps_3_year, load_catalogue(catalogue),
                                 llh_dict["llh_sig_time_pdf"])

        keys = sorted(list(dataset.keys()))

        self.assertEqual(keys, true_keys)

        # Test three declinations

        unblind_dict = {
            "name":
            "test_custom_dataset",
            "mh_name":
            "fixed_weights",
            "dataset":
            custom_dataset(icecube_ps_3_year, load_catalogue(catalogue),
                           llh_dict["llh_sig_time_pdf"]),
            "catalogue":
            catalogue,
            "llh_dict":
            llh_dict,
        }

        ub = create_unblinder(unblind_dict)
        key = [x for x in ub.res_dict.keys() if x != "TS"][0]
        res = ub.res_dict[key]
        for j, x in enumerate(res["x"]):
            self.assertAlmostEqual(x, true_parameters[j], delta=0.1)

        logging.info("Best fit values {0}".format(list(res["x"])))
        logging.info("Reference best fit {0}".format(true_parameters))
Beispiel #6
0
    def test_declination_sensitivity(self):

        logging.info("Testing 'fit_weight' MinimisationHandler class")

        mh_name = "fit_weights"

        # Test three declinations

        unblind_dict = {
            "name": "tests/test_mh_fit_weights",
            "mh_name": mh_name,
            "dataset": icecube_ps_3_year.get_seasons("IC86-2011"),
            "catalogue": catalogue,
            "llh_dict": llh_dict,
        }

        ub = create_unblinder(unblind_dict)
        key = [x for x in ub.res_dict.keys() if x != "TS"][0]
        res = ub.res_dict[key]

        logging.info("Best fit values {0}".format(list(res["x"])))
        logging.info("Reference best fit {0}".format(true_parameters))

        for i, x in enumerate(res["x"]):
            self.assertAlmostEqual(x, true_parameters[i], delta=0.1)

        inj_dict = {
            "injection_sig_time_pdf": {"time_pdf_name": "steady"},
            "injection_bkg_time_pdf": {
                "time_pdf_name": "steady",
            },
            "injection_energy_pdf": {"energy_pdf_name": "power_law", "gamma": 2.0},
        }

        mh_dict = dict(unblind_dict)
        mh_dict["inj_dict"] = inj_dict
        mh_dict["n_trials"] = 1.0
        mh_dict["n_steps"] = 3.0
        mh_dict["scale"] = 5.0

        mh = MinimisationHandler.create(mh_dict)
        res = mh.simulate_and_run(5.0)
        analyse(mh_dict, cluster=False)

        mh.corner_likelihood_scan(
            save=True,
            res_dict=res,
        )
Beispiel #7
0
    def test_flare(self):
        logging.info("Testing 'flare' MinimisationHandler class")

        ub = create_unblinder(unblind_dict, full_plots=True)
        res = [x for x in ub.res_dict["Parameters"].values()]

        logging.info("Best fit values {0}".format(list(res)))
        logging.info("Reference best fit {0}".format(true_parameters))

        for i, x in enumerate(res):
            if i < 2:
                self.assertAlmostEqual(x / true_parameters[i], 1.0, places=1)
            else:
                self.assertEqual(x, true_parameters[i])

        inj_dict = {
            "injection_sig_time_pdf": {
                "time_pdf_name": "steady"
            },
            "injection_bkg_time_pdf": {
                "time_pdf_name": "steady",
            },
            "injection_energy_pdf": {
                "energy_pdf_name": "power_law",
                "gamma": 2.0
            },
        }

        mh_dict = dict(unblind_dict)
        mh_dict["inj_dict"] = inj_dict
        mh_dict["n_trials"] = 1.0
        mh_dict["n_steps"] = 3.0
        mh_dict["scale"] = 5.0

        mh = MinimisationHandler.create(mh_dict)
        res = mh.simulate_and_run(5.0)
        analyse(mh_dict, cluster=False)
    for gamma_index in gammas:
        res = dict()
        nr_srcs = int(nr_brightest_sources[0])
        cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
        catalogue = load_catalogue(cat_path)
        cat = np.load(cat_path)

        name = generate_name(unique_key, nr_srcs, gamma_index)
        bkg_ts = bkg_ts_base_name(unique_key, nr_srcs)

        injection_time = llh_time
        injection_energy = dict(llh_energy)
        injection_energy["gamma"] = gamma_index

        inj_kwargs = {
            "injection_energy_pdf": injection_energy,
            "injection_sig_time_pdf": injection_time,
        }

        unblind_dict = {
            "name": name,
            "mh_name": "large_catalogue",
            "dataset": diffuse_8_year.get_seasons(),
            "catalogue": cat_path,
            "llh_dict": llh_dict,
            "background_ts": bkg_ts,
        }

        ub = create_unblinder(unblind_dict, mock_unblind=True, full_plots=False)
Beispiel #9
0
    for pdf_type in sn_times[cat]:

        llh_times = sn_time_pdfs(cat, pdf_type=pdf_type)

        name = f"{name_root}/{pdf_type}/{cat}"
        bkg_ts = f"{bkg_ts_root}/{pdf_type}/{cat}"

        for llh_time in llh_times:

            time = (llh_time["decay_time"] if "decay" in pdf_type else
                    llh_time["pre_window"] + llh_time["post_window"])

            unblind_llh = {
                "llh_name": "standard",
                "llh_energy_pdf": llh_energy,
                "llh_sig_time_pdf": llh_time,
            }

            unblind_dict = {
                "name": name,
                "ts_type": "Fit Weights",
                "mh_name": "fit_weights",
                "dataset": custom_dataset(ps_v002_p01, catalogue, llh_time),
                "catalogue": cat_path,
                "llh_dict": unblind_llh,
                "background_ts": f"{bkg_ts}/{time}/",
            }

            ub = create_unblinder(unblind_dict, mock_unblind=True)
Beispiel #10
0
        name,
        "mh_name":
        "fit_weights",
        "dataset":
        custom_dataset(txs_sample_v1, catalogue, llh_dict["llh_sig_time_pdf"]),
        "catalogue":
        cat_path,
        "llh_dict":
        llh_dict,
        "background_ts":
        bkg_ts
    }

    # ub = create_unblinder(unblind_dict, mock_unblind=False)
    ub = create_unblinder(unblind_dict,
                          mock_unblind=False,
                          disable_warning=True)

    r = ub.res_dict
    print(r)
    ns = np.zeros(len(catalogue))

    for x, val in r["Parameters"].items():
        if "n_s" in x:
            mask = np.array(
                [str(y) in str(x) for y in catalogue["source_name"]])
            ns[mask] = float(val)
    print(f"nstot: {np.sum(ns)}")

    res.append((cat, ub.ts))
Beispiel #11
0
}

res_dict = dict()

for cat in sn_cats:
    name = name_root + cat + "/"
    bkg_ts = bkg_ts_root + cat + "/"

    cat_path = sn_catalogue_name(cat)
    catalogue = np.load(cat_path)

    llh_times = sn_time_pdfs(cat)

    for llh_time in llh_times:
        unblind_llh = {
            "llh_name": "standard",
            "llh_energy_pdf": llh_energy,
            "llh_time_pdf": llh_time,
        }

        unblind_dict = {
            "name": name,
            "mh_name": "fit_weights",
            "dataset": custom_dataset(ps_v002_p01, catalogue, llh_time),
            "catalogue": cat_path,
            "llh_dict": unblind_llh,
            "background_ts": bkg_ts,
        }

        ub = create_unblinder(unblind_dict, mock_unblind=False)
def run_background_pvalue_trials(seed=None, ntrials=1):
    """
    Run background trials and calculate the p-values
    :param seed: int, seed for dataset simulation
    :param ntrials: int, number of trials to run
    :return: dictionary containing the trial results
    """

    # setting the random seed ensures that the dataset produced by the Unblinder
    # will always produce the same dataset
    rng = np.random.default_rng(seed=seed)

    # generating an array of seeds that will be used if more than one trial is run
    seeds = rng.integers(0, 2**30, ntrials)

    pdf_full_res = dict()

    for pdf_type in ["box", "decay"]:

        # base name
        raw = f"{base_raw}/{pdf_type}/"

        # set up emtpy dictionary to store the minimizer information in
        full_res = dict()

        # loop over SN catalogues
        use_cats = sn_cats if pdf_type == "box" else ["IIn", "IIP"]
        for cat in use_cats:

            name = raw + cat + "/"

            # set up empty results dictionary for this catalogue
            cat_res = dict()

            # get the time pdfs for this catalogue
            time_pdfs = sn_time_pdfs(cat, pdf_type=pdf_type)

            # Loop over time PDFs
            for llh_time in time_pdfs:

                # set up an empty results array for this time pdf
                time_res = dict()

                logging.debug(f"time_pdf is {llh_time}")

                if pdf_type == "box":
                    time_key = str(llh_time["post_window"] +
                                   llh_time["pre_window"])
                    # pdf_time = float(time_key) if llh_time['pre_window'] == 0 else - float(time_key)

                else:
                    time_key = str(llh_time["decay_time"])

                time_name = name + time_key + "/"

                # Loop over spectral indices
                for gamma in gammas:

                    # load the background trials
                    if (pdf_type == "decay") and (gamma == 2.0):
                        gamma_str = "2"
                    else:
                        gamma_str = str(gamma)

                    full_name = time_name + gamma_str + "/"

                    mh_dict_path = analysis_pickle_path(name=full_name)
                    logger.debug(
                        f"loading analysis pickle from {mh_dict_path}")
                    with open(mh_dict_path, "rb") as f:
                        mh_dict = pickle.load(f)

                    unblind_dict = dict(mh_dict)
                    unblind_dict["name"] += "_create_backround_pvalue_distr"
                    unblind_dict["background_ts"] = mh_dict["name"]

                    ub = create_unblinder(
                        unblind_dict,
                        mock_unblind=True,
                        disable_warning=True,
                        seed=seeds[0],
                    )
                    bkg_ts_array = ub.bkg_ts_array
                    p_values = [ub.raw_pre_trial_pvalue]
                    logger.info(f"Raw p-value is {ub.raw_pre_trial_pvalue}")

                    for s in seeds[1:]:
                        int_seed = s
                        logger.debug(f"seed is {int_seed}")
                        res_dict = ub.simulate_and_run(0, int_seed)
                        ts = res_dict["TS"]

                        raw_p_value = len(bkg_ts_array[
                            bkg_ts_array > ts]) / len(bkg_ts_array)
                        logger.info(f"Raw p-value is {raw_p_value}")
                        p_values.append(raw_p_value)

                    time_res[gamma] = p_values
                cat_res[time_key] = time_res
            full_res[cat] = cat_res
        pdf_full_res[pdf_type] = full_res

    return pdf_full_res