Exemple #1
0
    def __init__(self, n_cpu, **kwargs):
        self.queue = JoinableQueue()
        self.log_queue = Queue()
        self.n_tasks = Value('i', 0)
        kwargs["n_tasks"] = self.n_tasks

        self.processes = [
            Process(target=self.run_trial, kwargs=kwargs)
            for _ in range(int(n_cpu))
        ]

        self.mh = MinimisationHandler.create(kwargs["mh_dict"])
        for season in self.mh.seasons.keys():
            inj = self.mh.get_injector(season)
            inj.calculate_n_exp()
        self.mh_dict = kwargs["mh_dict"]
        self.scales = []

        handler = logging.StreamHandler()
        handler.setFormatter(
            logging.Formatter(
                "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))
        # ql gets records from the queue and sends them to the handler

        ql = QueueListener(self.log_queue, handler)
        ql.start()

        for p in self.processes:
            p.start()
Exemple #2
0
 def do_asimov_scale_estimation(self):
     """estimate the injection scale using Asimov estimation"""
     logger.info("doing asimov estimation")
     mh = MinimisationHandler.create(self.mh_dict)
     scale_estimate = mh.guess_scale()
     logger.debug(f"estimated scale: {scale_estimate}")
     self.disc_guess = scale_estimate
     self.sens_guess = 0.3 * self.disc_guess
Exemple #3
0
                    "name": name,
                    "mh_name": "fixed_weights",
                    "datasets": [IC86_1_dict],
                    "catalogue": ps_catalogue_name(sin_dec),
                    "llh_dict": llh_dict,
                    "inj kwargs": inj_dict,
                    "n_trials": 20,
                    "n_steps": 15,
                    "scale": scale,
                }

                pkl_file = make_analysis_pickle(mh_dict)

                # rd.submit_to_cluster(pkl_file, n_jobs=150)
                #
                mh = MinimisationHandler.create(mh_dict)
                # mh.iterate_run(n_steps=2, n_trials=20, scale=scale)
                mh.run(10, scale=float(scale))

                config_mh.append(mh_dict)

            res_dict[key] = config_mh

        all_res[gamma] = res_dict

rd.wait_for_cluster()

for (gamma, res_dict) in all_res.items():

    gamma_name = basename + str(gamma) + "/"
Exemple #4
0
    try:
        os.makedirs(analysis_path)
    except OSError:
        pass

    pkl_file = analysis_path + "dict.pkl"

    with open(pkl_file, "wb") as f:
        Pickle.dump(mh_dict, f)

    start = datetime.datetime.now()

    n_trials = 10

    mh = MinimisationHandler(mh_dict)
    mh.run(n_trials=n_trials, scale=0.0)
    mh.clear()
    end = datetime.datetime.now()
    diff = (end - start).total_seconds() / n_trials

    times.append(diff)

savepath = plot_output_dir(name_root) + "scale.pdf"

try:
    os.makedirs(os.path.dirname(savepath))
except OSError:
    pass

plt.figure()
    os.makedirs(os.path.dirname(ts_path))
except OSError:
    pass

if os.path.isfile(ts_path):
    with open(ts_path, "r") as f:
        print("Loading ts_array")
        ts_array = Pickle.load(f)

else:
    print("Empty TS array")
    ts_array = []

# Creates a Minimisation Handler using the dictionary, and runs the trials

mh_pl = MinimisationHandler.create(mh_dict_pl)
mh_tm = MinimisationHandler.create(mh_dict_tm)

n_trials = 100

for i in range(n_trials):

    seed = random.randint(0, 999999)
    mh_pl.set_random_seed(seed)
    res_pl = mh_pl.run_trial(scale=1.0)
    mh_tm.set_random_seed(seed)
    res_tm = mh_tm.run_trial(scale=1.0)
    ts = res_tm["TS"] - res_pl["TS"]
    print(i, seed, res_tm, res_pl, ts)
    ts_array.append(ts)
Exemple #6
0
    def __init__(self, rh_dict, do_sens=True, do_disc=True, bias_error="std"):

        self.sources = load_catalogue(rh_dict["catalogue"])

        self.name = rh_dict["name"]
        self.mh_name = rh_dict["mh_name"]
        self.scale = rh_dict["scale"]

        self.results = dict()
        self.pickle_output_dir = name_pickle_output_dir(self.name)
        self.plot_dir = plot_output_dir(self.name)
        self.merged_dir = os.path.join(self.pickle_output_dir, "merged")

        self.allow_extrapolation = rh_dict.get(
            "allow_extrapolated_sensitivity", True)

        # Checks if the code should search for flares. By default, this is
        # not done.
        # self.flare = self.mh_name == "flare"

        # if self.flare:
        #     self.make_plots = self.flare_plots
        # else:
        self.make_plots = self.noflare_plots
        self.bias_error = bias_error

        # Checks whether negative n_s is fit or not
        #
        # try:
        #     self.negative_n_s = llh_kwargs["Fit Negative n_s?"]
        # except KeyError:
        #     self.negative_n_s = False
        #
        # try:
        #     self.fit_weights = llh_kwargs["Fit Weights?"]
        # except KeyError:
        #     self.fit_weights = False

        # Sets default Chi2 distribution to fit to background trials
        #
        # if self.fit_weights:
        #     self.ts_type = "Fit Weights"
        # elif self.flare:
        #     self.ts_type = "Flare"
        # elif self.negative_n_s:
        #     self.ts_type = "Negative n_s"
        # else:
        self.ts_type = get_ts_fit_type(rh_dict)

        # print "negative_ns", self.negative_n_s

        p0, bounds, names = MinimisationHandler.find_parameter_info(rh_dict)

        # p0, bounds, names = fit_setup(llh_kwargs, self.sources, self.flare)
        self.param_names = names
        self.bounds = bounds
        self.p0 = p0

        # if cleanup:
        #     self.clean_merged_data()

        # this will have the TS threshold values as keys and a tuple containing
        # (injection scale, relative overfluctuations, error on overfluctuations)
        # as values
        self.overfluctuations = dict()

        self.sensitivity = np.nan
        self.sensitivity_err = np.nan
        self.bkg_median = np.nan
        self.frac_over = np.nan
        self.disc_potential = np.nan
        self.disc_err = np.nan
        self.disc_potential_25 = np.nan
        self.disc_ts_threshold = np.nan
        self.extrapolated_sens = False
        self.extrapolated_disc = False
        self.flux_to_ns = np.nan

        # if self.show_inj:
        self.inj = self.load_injection_values()
        self._inj_dict = rh_dict["inj_dict"]
        self._dataset = rh_dict["dataset"]
        # else:
        #     self.inj = None

        try:
            self.merge_pickle_data()
        except FileNotFoundError:
            logger.warning("No files found at {0}".format(
                self.pickle_output_dir))

        try:
            self.find_ns_scale()
        except ValueError as e:
            logger.warning(
                "RuntimeError for ns scale factor: \n {0}".format(e))
        except IndexError as e:
            logger.warning(
                f"IndexError for ns scale factor. Only background trials?")

        self.plot_bias()

        if do_sens:
            try:
                self.find_sensitivity()
            except ValueError as e:
                logger.warning(
                    "RuntimeError for discovery potential: \n {0}".format(e))

        if do_disc:
            try:
                self.find_disc_potential()
            except RuntimeError as e:
                logger.warning(
                    "RuntimeError for discovery potential: \n {0}".format(e))
            except TypeError as e:
                logger.warning(
                    "TypeError for discovery potential: \n {0}".format(e))
            except ValueError as e:
                logger.warning(
                    "TypeError for discovery potential: \n {0}".format(e))