def ensure_input_mc_exists(cls):
     if not os.path.exists(cls.inputmcdirectory):
         with client(ClientType.LOCAL):
             generatemc(
                 GenerateMCConfig(
                     WatchMakersConfig(numevents=1,
                                       directory=cls.inputmcdirectory),
                     numjobs=1,
                 )).compute()
     return
def smallsignaldataset() -> WatchmanDataset:
    directory = (f"{tempfile.gettempdir()}"
                 f"{os.sep}wm{os.sep}tmp{os.sep}"
                 f"tmp_watchoptical_unittest_testmctoanalysis")
    filenamepattern = f"{directory}{os.sep}*{os.sep}*{os.sep}*.root"
    if len(glob.glob(filenamepattern)) == 0:
        with client(ClientType.LOCAL):
            generatemc(
                GenerateMCConfig(
                    WatchMakersConfig(numevents=10, directory=directory),
                    filenamefilter=issignalfile,
                )).compute()
    return WatchmanDataset([filenamepattern])
Exemple #3
0
def main():
    logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
    args = parsecml()
    _validateargs(args)
    dataset = _build_dataset(args.dataset)
    _log.info("running: {}".format(args.alg if args.alg else "all"))
    alg = _csalgsnames_to_list(args.alg, args.output)
    with client(args.client):
        cached_apply_algorithms(
            alg,
            AnalysisEventTuple.fromAnalysisDataset(dataset).filter(
                lambda x: x is not None),
            force=args.force,
        )
def main():
    args = parsecml()
    dataset = WatchmanDataset(
        searchforrootfilesexcludinganalysisfiles(args.inputfiles))
    with client(args.client):
        analysisfiles = mctoanalysis(
            dataset,
            config=MCToAnalysisConfig(directory=args.directory)).compute()
    AnalysisDataset(
        sourcedataset=dataset,
        analysisfiles=list(analysisfiles),
        directory=Path(args.directory),
        inputfiles=[Path(p) for p in args.inputfiles],
    ).write(Path(args.directory) / "analysisdataset.pickle")
    return
def _run(args):
    directory = args.directory + os.sep + _getconfigdir(args)
    if not os.path.exists(directory):
        os.makedirs(directory, exist_ok=True)
    filenamefilter = _filenamefilterfromargs(args)
    injectratdb = _wrapindict(
        f"attenuation_{args.attenuation}_scattering_{args.scattering}",
        makeratdb(attenuation=args.attenuation, scattering=args.scattering),
    )
    config = GenerateMCConfig(
        WatchMakersConfig(directory=directory, numevents=args.num_events_per_job),
        numjobs=args.num_jobs,
        bonsaiexecutable=expandpath(args.bonsai),
        bonsailikelihood=expandpath(args.bonsai_likelihood),
        injectratdb=injectratdb,
        filenamefilter=filenamefilter,
    )
    with client(args.client):
        generatemc(config).compute()
Exemple #6
0
def test_cached_algorithm():
    with client(ClientType.SINGLE):
        with NamedTemporaryFile() as f:
            cache = Cache(f.name)
            algs1 = [Identity("k1"), Identity("k2")]
            dataset = dask.bag.from_sequence(["A", "B", "C", "D"])
            result1 = cached_apply_algorithms(algs1, dataset, cache=cache)
            algs2 = [Identity("k1"), Identity("k2")]
            result2 = cached_apply_algorithms(algs2, dataset, cache=cache)
            assert all([
                algs1[0].apply_count == 4,
                algs1[1].apply_count == 4,
                algs2[0].apply_count == 0,
                algs2[1].apply_count == 0,
                algs1[0].finish_count == 1,
                algs1[1].finish_count == 1,
                algs2[0].finish_count == 1,
                algs2[1].finish_count == 1,
            ])
            assert result1 == result2
Exemple #7
0
def signaldatasetfixture() -> AnalysisDataset:
    with client(ClientType.SINGLE):
        dirname = (
            f"{tempfile.gettempdir()}"
            f"{os.sep}wm{os.sep}tmp{os.sep}"
            "tmp_watchoptical_unittest_signaldataset_2"
        )
        if not os.path.exists(dirname):
            subprocess.run(
                [
                    "python",
                    "-m",
                    "watchopticalmc",
                    "--signal-only",
                    "--num-events-per-job=20",
                    "--num-jobs=1",
                    "--client=local",
                    f"--directory={dirname}",
                ]
            )
    return AnalysisDataset.load(Path(dirname) / "analysisdataset.pickle")
def test_mctoanalysis(smallsignaldataset):
    with client(ClientType.SINGLE):
        config = MCToAnalysisConfig(directory=tempfile.mkdtemp())
        results = mctoanalysis(smallsignaldataset, config).compute()
        anal = [AnalysisEventTuple.load(f).anal for f in results]
        assert len(results) > 0
        assert all(os.path.exists(f.filename) for f in results)
        pmt_columns = ["pmt_t", "pmt_x", "pmt_y", "pmt_z"]
        mc_columns = [
            "mc_pdgcode",
            "mc_t_start",
            "mc_t_end",
            "mc_x_start",
            "mc_x_end",
            "mc_y_start",
            "mc_y_end",
            "mc_z_start",
            "mc_z_end",
            "mc_ek_start",
            "mc_ek_end",
        ]
        assert all([col in a.pmt.columns for a in anal for col in pmt_columns])
        assert all([col in a.mc.columns for a in anal for col in mc_columns])
Exemple #9
0
def _run(args):
    with client(args.client):
        dask.bag.from_sequence(args.input_directories).map(
            _processdir(force=args.force)
        ).compute()