Example #1
0
    print("## In real unblinding mode, it's getting serious! ##")
    scramble = False

if use_skylab_bins:
    print("Using official skylab binnings for the PDFs!")
    outpath = os.path.join(PATHS.local, "unblinding_skylab_bins")
    pdf_path = os.path.join(PATHS.local, "bg_pdf_skylab_bins",
                            "bg_pdf.json.gz")
else:
    outpath = os.path.join(PATHS.local, "unblinding")
    pdf_path = os.path.join(PATHS.local, "bg_pdf", "bg_pdf.json.gz")

check_dir(outpath, ask=False)

# Extract source info, use the same fixed spectral index -2 for all
sources = _loader.source_list_loader()
nsrcs = len(sources)
ra = [src["ra"] for src in sources]
dec = [src["dec"] for src in sources]
# Theo weights need to be normalized manually in skylab?
w = np.ones(nsrcs, dtype=float) / float(nsrcs)

# Create the multi LLH
rnd_seed = 1
multillh = MultiPointSourceLLH(seed=rnd_seed, ncpu=40)

livetimes = _loader.livetime_loader()
for name, livetime in sorted(livetimes.items()):
    print("\n# Setting up LLH for sample '{}'".format(name))
    # Get exp data and MC
    exp = _loader.exp_data_loader(name)[name]
    print("  Ontime window duration: {:.2f} sec".format(dt1 - dt0))
    print("  Ontime events: {} / {}".format(np.sum(ontime), nevts))
    for i, on_per_src in enumerate(ontime):
        print("  - Source {}: {} on time".format(i, np.sum(on_per_src)))
    return offtime


off_data_outpath = os.path.join(PATHS.data, "data_offtime")
on_data_outpath = os.path.join(PATHS.data, "data_ontime")
mc_outpath = os.path.join(PATHS.data, "mc_no_hese")
for _p in [off_data_outpath, on_data_outpath, mc_outpath]:
    if not os.path.isdir(_p):
        os.makedirs(_p)

# Load sources and lowest/highest lower/upper time window edge
sources = source_list_loader("all")
_dts0, _dts1 = time_window_loader("all")
dt0_min, dt1_max = np.amin(_dts0), np.amax(_dts1)

# Load runlists
runlists = runlist_loader("all")

# Load needed data and MC from PS track and add in one year of GFU sample
ps_tracks = Datasets["PointSourceTracks"]
ps_sample_names = ["IC79", "IC86, 2011", "IC86, 2012-2014"]
gfu_tracks = Datasets["GFU"]
gfu_sample_names = ["IC86, 2015"]
all_sample_names = sorted(ps_sample_names + gfu_sample_names)

# Base MC is same for multiple samples, match names here
name2heseid_file = {
Example #3
0
    os.makedirs(outpath)

# Binning used for injector and LLH models alike
# Finer resolution around the horizon region, where we usually switch the event
# selections from northern to southern samples
hor = np.sin(np.deg2rad(30))
sd_lo, sd_hi = -1., 1.
sindec_bins = np.unique(
    np.concatenate([
        np.linspace(sd_lo, -hor, 3 + 1),  # south
        np.linspace(-hor, +hor, 14 + 1),  # horizon
        np.linspace(+hor, sd_hi, 3 + 1),  # north
    ]))

# Make settings for each module per sample
sample_names = source_list_loader()
for key in sample_names:
    print("Building settings file for sample '{}'".format(key))
    # Load data that settings depend on
    srcs = source_list_loader(key)[key]
    runlist = runlist_loader(key)[key]
    exp_off = off_data_loader(key)[key]
    exp_on = on_data_loader(key)[key]
    mc = mc_loader(key)[key]

    # :: BG injector ::
    # Rebinning for the rate model fits, use monthly bins
    if key == "IC86_2012-2014":
        n_rate_bins = 36
    else:
        n_rate_bins = 12
Example #4
0
                  (src["run_id"] == exp["Run"]))
        is_hese_src = np.logical_or(is_hese_src, mask_i)
        print("  - Source {}: {}. Dec: {:.2f} deg. logE: {} log(GeV)".format(
            i, np.sum(mask_i), np.rad2deg(src["dec"]), exp[mask_i]["logE"]))
    return is_hese_src


exp_data_outpath = os.path.join(PATHS.data, "exp_no_hese")
mc_outpath = os.path.join(PATHS.data, "mc_no_hese")
out_paths = {"exp": exp_data_outpath, "mc": mc_outpath}
for _p in out_paths.values():
    if not os.path.isdir(_p):
        os.makedirs(_p)

# Load source list
sources = source_list_loader()

# Load needed data and MC from PS track and add in one year of GFU sample
ps_tracks = Datasets["PointSourceTracks"]
# We don't use "IC40", "IC59" because we don't have HESE removed MCs available
ps_sample_names = ["IC79", "IC86, 2011", "IC86, 2012-2014"]
gfu_tracks = Datasets["GFU"]
gfu_sample_names = ["IC86, 2015"]
all_sample_names = sorted(ps_sample_names + gfu_sample_names)

# Base MC is same for multiple samples, match names here
name2heseid_file = {
    "IC79": "IC79.json.gz",
    "IC86_2011": "IC86_2011.json.gz",
    "IC86_2012-2014": "IC86_2012-2015.json.gz",
    "IC86_2015": "IC86_2012-2015.json.gz"