"inj_dict": inj_kwargs,
                    "n_trials": 1,  # 10,
                    # "n_steps": 15,
                }

                mh = MinimisationHandler.create(mh_dict)
                scale_factor = 3 * mh.guess_scale(
                ) / 3 / 7 / scale_factor_per_decade[i]
                print("Scale Factor: ", scale_factor_per_decade[i],
                      scale_factor)

                # # # # # How to run on the cluster for sources < 3162
                mh_dict["n_steps"] = 15
                mh_dict["scale"] = scale_factor
                # # analyse(mh_dict, cluster=False, n_cpu=35, n_jobs=10)
                analyse(mh_dict, cluster=False, n_cpu=32, n_jobs=150)

                # How to run on the cluster for sources > 3162
                # _n_jobs = 50
                # scale_loop = np.linspace(0, scale_factor, 15)
                # print(scale_loop)
                # for scale in scale_loop[:4]:
                #     print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
                #     mh_dict["fixed_scale"] = scale
                #     # # analyse(mh_dict, cluster=False, n_cpu=35, n_jobs=10)
                #     if scale == 0.:
                #         n_jobs = _n_jobs*10
                #     else:
                #         n_jobs = _n_jobs
                #     print("Submitting " + str(n_jobs) + " jobs")
                #     analyse(mh_dict, cluster=True, n_cpu=1, n_jobs=n_jobs)
Exemple #2
0
                    "llh_dict":
                    llh_dict,
                    "scale":
                    scale,
                    "n_trials":
                    ntrials / cluster if cluster else ntrials,
                    "n_steps":
                    10,
                }

                job_id = None
                if args.analyse:
                    job_id = analyse(
                        mh_dict,
                        cluster=True if cluster else False,
                        n_cpu=1 if cluster else 25,
                        n_jobs=cluster,
                        h_cpu="00:59:59",
                    )
                job_ids.append(job_id)

                full_res[str(n)] = mh_dict

            sin_res[str(sindec)] = full_res

        gamma_res[gamma] = sin_res

    res[smoothing] = gamma_res

if cluster and np.any(job_ids):
    logging.info(f"waiting for jobs {job_ids}")
Exemple #3
0
    "llh_sig_time_pdf": llh_time,
    "llh_bkg_time_pdf": {
        "time_pdf_name": "steady"
    },
}

catalogue = np.load(updated_sn_catalogue_name("IIn"))
catalogue["distance_mpc"] = np.array([1] * len(catalogue))
dir_path = os.path.dirname(os.path.realpath(__file__))
temp_save_catalogue_to = f"{dir_path}/temp_check_stack_bias_equal_dist_cat.npy"
np.save(temp_save_catalogue_to, catalogue)

mh_dict = {
    "name": "examples/crosscheck_stacking_equal_dist/",
    "mh_name": "large_catalogue",
    "dataset": ps_v002_p01.get_seasons(),
    "catalogue": temp_save_catalogue_to,
    #     "catalogue": ps_stack_catalogue_name(0.1, 0.3),
    #     "catalogue": tde_catalogue_name("jetted"),
    "inj_dict": inj_dict,
    "llh_dict": llh_dict,
    "scale": 10.0,
    "n_trials": 30,
    "n_steps": 10,
}

analyse(mh_dict, cluster=False, n_cpu=32)
rh = ResultsHandler(mh_dict)

os.remove(temp_save_catalogue_to)
Exemple #4
0
            cat_path,
            "inj_dict":
            inj_kwargs,
            "llh_dict":
            llh_kwargs,
            "scale":
            scale,
            "n_trials":
            100,
            "n_steps":
            15,  # number of flux values
        }

        # if mh_name == "flare":

        analyse(mh_dict, n_cpu=24, cluster=False)

        # raw_input("prompt")

        res[flare_length] = mh_dict

    src_res[label] = res

wait_for_cluster()  # for cluster

sens = [[] for _ in src_res]
fracs = [[] for _ in src_res]
disc_pots = [[] for _ in src_res]
sens_e = [[] for _ in src_res]
disc_e = [[] for _ in src_res]
                "n_trials": 9,
            }

            mh = MinimisationHandler.create(mh_dict)

            # Set the scale for the injection
            scale_factor = 3 * mh.guess_scale() / 3
            '''
            # UNCOMMENT THIS IF:
            # 1. It is the first time you are running this code
            # 2. You want to run locally
            # 3. If you are running on the cluster with < 1000 sources
            # '''
            mh_dict["n_steps"] = 15
            mh_dict["scale"] = scale_factor
            analyse(mh_dict, cluster=False, n_cpu=8, n_jobs=100)
            '''
            UNCOMMENT THIS IF:
            1. If you are running on the cluster with > 1000 sources
            '''
            # _n_jobs = 100
            # scale_loop = np.linspace(0, scale_factor, 15)
            # print(scale_loop)
            # for scale in scale_loop[:]:
            #     print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
            #     mh_dict["fixed_scale"] = scale
            #     if scale == 0.:
            #         n_jobs = _n_jobs*10
            #     else:
            #         n_jobs = _n_jobs
            #     print("Submitting " + str(n_jobs) + " jobs")
Exemple #6
0
                    "mh_name": mh_name,
                    "dataset": custom_dataset(ps_v002_p01, catalogue,
                                              llh_dict["llh_sig_time_pdf"]),
                    "catalogue": cat_path,
                    "inj_dict": inj_dict,
                    "llh_dict": llh_dict,
                    "scale": scale,
                    "n_trials": 500/cluster if cluster else 1000,
                    "n_steps": 10
                }

                # call the main analyse function
                job_id = None
                job_id = analyse(mh_dict,
                                 cluster=True if cluster else False,
                                 n_cpu=1 if cluster else 32,
                                 n_jobs=cluster,
                                 h_cpu='00:59:59')
                job_ids.append(job_id)

                time_res[gamma] = mh_dict

            cat_res[time_key] = time_res

        full_res[cat] = cat_res

    # Wait for cluster. If there are no cluster jobs, this just runs
    if cluster and np.any(job_ids):
        logging.info(f'waiting for jobs {job_ids}')
        wait_for_cluster(job_ids)
Exemple #7
0
    "datasets": ps_v002_p01.get_seasons("IC86_1"),
    "catalogue": txs_cat_path,
    "inj_dict": inj_kwargs,
    "llh_dict": llh_kwargs,
    "n_trials": 1,
    "n_steps": 10
}

mh = MinimisationHandler.create(mh_dict)
scale = mh.guess_scale()

mh_dict["scale"] = scale

# Creates a Minimisation Handler using the dictionary, and runs the trials

analyse(mh_dict, n_cpu=2, n_jobs=1, cluster=False)
# wait_cluster()

# mh.iterate_run(scale, n_steps=mh_dict["n_steps"],
#                n_trials=mh_dict["n_trials"])

# Creates a Results Handler to analyse the results, and calculate the
# sensitivity. This is the flux that needs to arrive at Earth, in order for
# IceCube to see an overfluctuation 90% of the time. Prints this information.

rh = ResultsHandler(mh_dict)
sens = rh.sensitivity

# Converts the flux at Earth to a required luminosity from the source,
# by scaling with distance and accounting for redshift
            mh_dict = {
                "name": full_name,
                "mh_name": mh_name,
                "dataset": dataset,
                "catalogue": cat_path,
                "inj_dict": inj_dict,
                "llh_dict": llh_dict,
                "scale": scale,
                "n_trials": 10,
                "n_steps": 15
            }

            # Run jobs on cluster

            job_id = analyse(mh_dict,
                             cluster=cluster,
                             n_cpu=1 if cluster else 32,
                             h_cpu='00:59:59')
            job_ids.append(job_id)

            res[flare_length] = mh_dict

        src_res[label] = res

    cat_res[cat] = src_res

wait_cluster(job_ids)

# Wait for cluster jobs to finish

for (cat, src_res) in cat_res.items():
Exemple #9
0
                    "n_trials": 1,
                }

                mh = MinimisationHandler.create(mh_dict)

                # Set the scale for the injection
                scale_factor = 3 * mh.guess_scale() / 3 / 7
                '''
                UNCOMMENT THIS IF:
                1. It is the first time you are running this code
                2. You want to run locally
                3. If you are running on the cluster with < 1000 sources
                '''
                mh_dict["n_steps"] = 15
                mh_dict["scale"] = scale_factor
                analyse(mh_dict, cluster=True, n_cpu=8, n_jobs=100)
                '''
                UNCOMMENT THIS IF:
                1. If you are running on the cluster with > 1000 sources
                '''
                # _n_jobs = 100
                # scale_loop = np.linspace(0, scale_factor, 15)
                # print(scale_loop)
                # for scale in scale_loop[:]:
                #     print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
                #     mh_dict["fixed_scale"] = scale
                #     if scale == 0.:
                #         n_jobs = _n_jobs*10
                #     else:
                #         n_jobs = _n_jobs
                #     print("Submitting " + str(n_jobs) + " jobs")