コード例 #1
0
def make_run_list(ev_times, ev_runids, exclude_runs=None):
    """
    Make a run list from given event times and run IDs and a list of included
    runs. Run start, stop are estimated from first and last event time in each
    run, which is biased but there seems to be no better way.

    Parameters
    ----------
    ev_times : array-like, shape (nevts,)
        Event times in MJD days from the used data sample.
    ev_runids : array-like, shape (nevts,)
        Event run IDs from the used data sample.
    exclude_runs : array-like or None, optional
        Run IDs to exclude, for example when samples overlap.
        (default: ``None``)

    Returns
    -------
    run_list : list of dicts
        Dict with keys similar to a snapshot from [1]_ in JSON format:
            [
              {"good_tstart": "YYYY-MM-DD HH:MM:SS",
               "good_tstop": "YYYY-MM-DD HH:MM:SS",
               "run": 123456},
              {...}, ..., {...}
            ]
        Times are given in iso formatted strings and run IDs as integers.

    References
    ----------
    .. [1] live.icecube.wisc.edu/snapshots
    """
    # If selected runs were empty on final level, they are not considered here
    used_run_ids = np.unique(ev_runids).astype(int)
    ev_runids = ev_runids.astype(int)

    if exclude_runs is not None:
        used_run_ids = filter(lambda runid: runid not in exclude_runs,
                              used_run_ids)
        print("  Exluded runs: {}".format(arr2str(exclude_runs, fmt="{:d}")))

    run_list = []
    livetimes = np.zeros(len(used_run_ids), dtype=float)
    for i, runid in enumerate(used_run_ids):
        ev_mask = (ev_runids == runid)
        ev_t = ev_times[ev_mask]
        # (Under-) Estimate livetime by difference of last and first event time
        tstart = astrotime.Time(np.amin(ev_t), format="mjd").iso
        tstop = astrotime.Time(np.amax(ev_t), format="mjd").iso
        livetimes[i] = np.amax(ev_t) - np.amin(ev_t)
        # Append valid run dict to run list
        run_list.append({"run": runid, "good_tstart": tstart,
                        "good_tstop": tstop})

    print("  Livetime: {:.3f} days".format(np.sum(livetimes)))
    print("  Had {} / {} runs with non-zero livetime.".format(
        np.sum(livetimes > 0), len(used_run_ids)))
    return run_list
コード例 #2
0
# let's just remove the few runs here)
exclude = {name: None for name in all_sample_names}
exclude["IC86, 2012-2014"] = [120028, 120029, 120030]
exclude["IC86, 2015"] = [126289, 126290, 126291]

outpath = os.path.join(PATHS.local, "runlists")
if not os.path.isdir(outpath):
    os.makedirs(outpath)

for name in all_sample_names:
    print("Making runlist for {}".format(name))
    if name in ps_sample_names:
        tracks = ps_tracks
    else:
        tracks = gfu_tracks

    exp_files = tracks.files(name)[0]
    exp = tracks.load(exp_files)

    _info = arr2str(exp_files if isinstance(exp_files, list) else [exp_files],
                    sep="\n    ")
    print("  Loaded {} track sample from skylab:\n    {}".format(
        "PS" if name in ps_sample_names else "GFU", _info))

    ev_times, ev_runids = exp["time"], exp["Run"]
    run_list = make_run_list(ev_times, ev_runids, exclude[name])
    fname = name.replace(", ", "_") + ".json"
    with open(os.path.join(outpath, fname), "w") as outf:
        json.dump(run_list, fp=outf, indent=2)
        print("  Saved to:\n    {}".format(os.path.join(outpath, fname)))
コード例 #3
0
                    "\nAllow overwrites (y/n)? ")
    if not res.lower() in ("y", "yes"):
        print("Abort. Script has done nothing.")
        sys.exit()
    print("  Using output directory '{}'.".format(outpath))
else:
    os.makedirs(outpath)
    print("Created output directory '{}'.".format(outpath))

files = np.array(glob(os.path.join(inpath, "*.json")))
file_names = np.array(map(lambda s: os.path.basename(s), files))
dataset_nums = set(map(lambda s: s.split("_")[0], file_names))

# Combine all to a single JSON file
print("Reading files from directory:\n  {}".format(inpath))
print("  Found JSON files for datasets: {}".format(arr2str(dataset_nums)))
run_ids_per_sam = {}
event_ids_per_sam = {}
for num in dataset_nums:
    print("Combining IDs from set '{}':".format(num))
    run_ids = []
    event_ids = []
    _files = files[map(lambda s: s.startswith(num), file_names)]
    for fi in tqdm(_files):
        with open(fi, "r") as inf:
            di = json.load(inf)

        run_ids += di["run_id"]
        event_ids += di["event_id"]

    assert len(run_ids) == len(event_ids)