예제 #1
0
def plot_difference_tot(filename):

    N = {}
    for flagged in [True, False]:

        N["flagged" if flagged else "unflagged"] = []
        Nnew = []

        for i, cat in enumerate(sn_cats):
            catarr = np.load(updated_sn_catalogue_name(cat, flagged=flagged))
            N["flagged" if flagged else "unflagged"].append(len(catarr))

            newcat = np.load(
                updated_sn_catalogue_name(cat, pdf_name="missed_objects"))
            Nnew.append(len(newcat))

    fig, ax = plt.subplots()

    x = np.arange(len(sn_cats))
    width = 0.4

    r1 = ax.bar(x + width / 2, N["unflagged"], width, label="new catalogue")
    r2 = ax.bar(x - width / 2, N["flagged"], width, label="old catalogue")
    r1new = ax.bar(
        x + width / 2,
        Nnew,
        width,
        bottom=np.array(N["unflagged"]) - np.array(Nnew),
        hatch="//",
        color=r1[0].get_facecolor(),
        label="previously not included",
    )

    ax.set_ylabel("number of objects")
    ax.set_xlabel("SN types")
    ax.set_xticks(x)
    ax.set_xticklabels(sn_cats)
    ax.set_ylim(np.array(ax.get_ylim()) * [1, 1.1])
    ax.set_title("number of objects in catalogues")
    ax.legend()

    autolabel(r1, ax)
    autolabel(r2, ax)

    fig.savefig(filename)
    plt.close()
예제 #2
0
def plot_difference_individual(sn_types, filename):

    fig, axs = plt.subplots(len(sn_types))

    for i, sn_type in enumerate(sn_types):
        ax = axs[i]
        N = {}

        cats = []
        for pdf_type in sn_times[sn_type]:
            for pdf_time in sn_times[sn_type][pdf_type]:
                cats.append(pdf_names(pdf_type, pdf_time))

        for flagged in [True, False]:
            N["flagged" if flagged else "unflagged"] = []

            for cat in cats:
                catarr = np.load(
                    updated_sn_catalogue_name(sn_type,
                                              pdf_name=cat,
                                              flagged=flagged))
                N["flagged" if flagged else "unflagged"].append(len(catarr))

        x = np.arange(len(cats))
        width = 0.4

        r1 = ax.bar(x + width / 2, N["unflagged"], width, label="new")
        r2 = ax.bar(x - width / 2, N["flagged"], width, label="old")

        ax.set_ylabel(f"SN type {sn_type}")
        ax.set_xticks(x)
        ax.set_xticklabels(cats)
        ax.set_ylim(np.array(ax.get_ylim()) * [1, 1.1])

        autolabel(r1, ax)
        autolabel(r2, ax)

    axs[-1].set_xlabel("PDF type")
    axs[0].set_title("catalogues with wrong classifications")
    axs[-1].legend()

    fig.savefig(filename)
    plt.close()
예제 #3
0
inj_dict = {
    "injection_energy_pdf": injection_energy,
    "injection_sig_time_pdf": injection_time,
}

llh_dict = {
    "llh_name": "standard_matrix",
    "llh_energy_pdf": llh_energy,
    "llh_sig_time_pdf": llh_time,
    "llh_bkg_time_pdf": {
        "time_pdf_name": "steady"
    },
}

catalogue = np.load(updated_sn_catalogue_name("IIn"))
catalogue["distance_mpc"] = np.array([1] * len(catalogue))
dir_path = os.path.dirname(os.path.realpath(__file__))
temp_save_catalogue_to = f"{dir_path}/temp_check_stack_bias_equal_dist_cat.npy"
np.save(temp_save_catalogue_to, catalogue)

mh_dict = {
    "name": "examples/crosscheck_stacking_equal_dist/",
    "mh_name": "large_catalogue",
    "dataset": ps_v002_p01.get_seasons(),
    "catalogue": temp_save_catalogue_to,
    #     "catalogue": ps_stack_catalogue_name(0.1, 0.3),
    #     "catalogue": tde_catalogue_name("jetted"),
    "inj_dict": inj_dict,
    "llh_dict": llh_dict,
    "scale": 10.0,
예제 #4
0
        # get the time pdfs for this catalogue
        time_pdfs = sn_time_pdfs(cat, pdf_type=pdf_type)

        # Loop over time PDFs
        for llh_time in time_pdfs:

            # set up an empty results array for this time pdf
            time_res = dict()

            logging.debug(f'time_pdf is {llh_time}')

            time_key = str(llh_time['decay_time'])

            pdf_time = llh_time['decay_time'] / 364.25
            pdf_name = pdf_names(pdf_type, pdf_time)
            cat_path = updated_sn_catalogue_name(cat)  # , pdf_name=pdf_name)
            logging.debug('catalogue path: ' + str(cat_path))

            # load catalogue and select the closest source
            # that serves for estimating a good injection scale later
            catalogue = np.load(cat_path)
            logging.debug('catalogue dtype: ' + str(catalogue.dtype))
            closest_src = np.sort(catalogue, order="distance_mpc")[0]

            time_name = name + time_key + "/"

            # set up the likelihood dictionary
            llh_dict = {
                "llh_name": "standard",
                "llh_energy_pdf": llh_energy,
                "llh_sig_time_pdf": llh_time,
예제 #5
0
)
from flarestack.core.unblinding import create_unblinder
from flarestack.utils.custom_dataset import custom_dataset

name_root = "analyses/ccsn/necker_2019/unblind_ccsn"
bkg_ts_root = "analyses/ccsn/necker_2019/calculate_sensitivity/fit_weights"

llh_energy = {
    "energy_pdf_name": "power_law",
}

res_dict = dict()

for cat in sn_cats:

    cat_path = updated_sn_catalogue_name(cat)
    catalogue = np.load(cat_path)

    for pdf_type in sn_times[cat]:

        llh_times = sn_time_pdfs(cat, pdf_type=pdf_type)

        name = f"{name_root}/{pdf_type}/{cat}"
        bkg_ts = f"{bkg_ts_root}/{pdf_type}/{cat}"

        for llh_time in llh_times:

            time = (llh_time["decay_time"] if "decay" in pdf_type else
                    llh_time["pre_window"] + llh_time["post_window"])

            unblind_llh = {
예제 #6
0
inj_dict = {
    "injection_energy_pdf": injection_energy,
    "injection_sig_time_pdf": injection_time,
}

llh_dict = {
    "llh_name": "standard",
    "llh_energy_pdf": llh_energy,
    "llh_sig_time_pdf": llh_time,
    "llh_bkg_time_pdf": {
        "time_pdf_name": "steady"
    },
}

old_path = updated_sn_catalogue_name("IIn")
new_cat_path = os.path.join(os.path.dirname(old_path), "cut_IIn.npy")

old_cat = np.load(old_path)
old_cat = old_cat[old_cat["dec_rad"] > 0.0]
np.save(new_cat_path, old_cat)

mh_dict = {
    "name": "analyses/general/crosscheck_stacking",
    "mh_name": "fixed_weights",
    # "dataset": ps_v002_p01.get_seasons(),
    "dataset": diffuse_8_year.get_seasons(),
    # "catalogue": updated_sn_catalogue_name("IIn"),
    "catalogue": new_cat_path,
    #     "catalogue": ps_stack_catalogue_name(0.1, 0.3),
    #     "catalogue": tde_catalogue_name("jetted"),
inj_dict = {
    'injection_energy_pdf': injection_energy,
    'injection_sig_time_pdf': injection_time
}

llh_dict = {
    "llh_name": "standard_matrix",
    "llh_energy_pdf": llh_energy,
    "llh_sig_time_pdf": llh_time,
    "llh_bkg_time_pdf": {
        "time_pdf_name": "steady"
    }
}

catalogue = np.load(updated_sn_catalogue_name('IIn'))
catalogue['distance_mpc'] = np.array([1] * len(catalogue))
dir_path = os.path.dirname(os.path.realpath(__file__))
temp_save_catalogue_to = f'{dir_path}/temp_check_stack_bias_equal_dist_cat.npy'
np.save(temp_save_catalogue_to, catalogue)

mh_dict = {
    "name": "examples/crosscheck_stacking_equal_dist/",
    "mh_name": 'large_catalogue',
    "dataset": ps_v002_p01.get_seasons(),
    "catalogue": temp_save_catalogue_to,
    #     "catalogue": ps_stack_catalogue_name(0.1, 0.3),
    #     "catalogue": tde_catalogue_name("jetted"),
    "inj_dict": inj_dict,
    "llh_dict": llh_dict,
    "scale": 10.,
예제 #8
0
                for pdf_type in sn_times[sn_type]:
                    logging.info(f"pdf type: {pdf_type}")

                    for pdf_time in sn_times[sn_type][pdf_type]:

                        # get catalogues for individual PDFs
                        pdf_name = pdf_names(pdf_type, pdf_time)
                        logging.info(f"pdf time: {pdf_time}")
                        catalogue = load_catalogue(sn_type,
                                                   pdf_name,
                                                   include_flagged=flag,
                                                   z_add=z_add)
                        savename = updated_sn_catalogue_name(
                            sn_type,
                            pdf_name,
                            flagged=flag,
                            z_conservative=z_add)
                        np.save(savename, catalogue)

                        # combine with previous PDF-catalogues
                        catalogue_red = catalogue[columns_out[keep_inds_out]]
                        if start:
                            combined_catalogue = catalogue_red
                        start = False

                        new_mask = np.invert([
                            name in combined_catalogue["source_name"]
                            for name in catalogue_red["source_name"]
                        ])
                        new_objects = catalogue_red[new_mask]