for cat in ["IIp", "IIn"]:

        name = raw + cat + "/"

        # cat_path = sn_catalogue_name(cat)
        # logging.debug('catalogue path: ' + str(cat_path))
        # catalogue = np.load(cat_path)
        #
        # logging.debug('catalogue dtype: ' + str(catalogue.dtype))
        #
        # closest_src = np.sort(catalogue, order="distance_mpc")[0]

        cat_res = dict()

        time_pdfs = sn_time_pdfs(cat, pdf_type=pdf_type)

        # Loop over time PDFs

        for llh_time in time_pdfs:

            pdf_time = llh_time["decay_time"] / 364.25
            pdf_name = pdf_names(pdf_type, pdf_time)
            cat_path = sn_catalogue_name(cat, pdf_name=pdf_name)
            logging.debug("catalogue path: " + str(cat_path))

            catalogue = np.load(cat_path)

            logging.debug("catalogue dtype: " + str(catalogue.dtype))

            closest_src = np.sort(catalogue, order="distance_mpc")[0]
Beispiel #2
0
bkg_ts_root = "analyses/ccsn/stasik_2017/calculate_sensitivity/"

llh_energy = {
    "energy_pdf_name": "power_law",
}

res_dict = dict()

for cat in sn_cats:
    name = name_root + cat + "/"
    bkg_ts = bkg_ts_root + cat + "/"

    cat_path = sn_catalogue_name(cat)
    catalogue = np.load(cat_path)

    llh_times = sn_time_pdfs(cat)

    for llh_time in llh_times:
        unblind_llh = {
            "llh_name": "standard",
            "llh_energy_pdf": llh_energy,
            "llh_time_pdf": llh_time,
        }

        unblind_dict = {
            "name": name,
            "mh_name": "fit_weights",
            "dataset": custom_dataset(ps_v002_p01, catalogue, llh_time),
            "catalogue": cat_path,
            "llh_dict": unblind_llh,
            "background_ts": bkg_ts,