예제 #1
0
            100,
            "n_steps":
            15,  # number of flux values
        }

        # if mh_name == "flare":

        analyse(mh_dict, n_cpu=24, cluster=False)

        # raw_input("prompt")

        res[flare_length] = mh_dict

    src_res[label] = res

wait_for_cluster()  # for cluster

sens = [[] for _ in src_res]
fracs = [[] for _ in src_res]
disc_pots = [[] for _ in src_res]
sens_e = [[] for _ in src_res]
disc_e = [[] for _ in src_res]

labels = []

for i, (f_type, res) in enumerate(sorted(src_res.items())):
    for (length, rh_dict) in sorted(res.items()):

        rh = ResultsHandler(rh_dict)

        inj_time = length * (60 * 60 * 24)
예제 #2
0
                        n_jobs=cluster,
                        h_cpu="00:59:59",
                    )
                job_ids.append(job_id)

                full_res[str(n)] = mh_dict

            sin_res[str(sindec)] = full_res

        gamma_res[gamma] = sin_res

    res[smoothing] = gamma_res

if cluster and np.any(job_ids):
    logging.info(f"waiting for jobs {job_ids}")
    wait_for_cluster(job_ids)

for smoothing, gamma_res in res.items():

    for gamma, sin_res in gamma_res.items():

        for sindec in same_sindecs:

            full_res = sin_res[str(sindec)]

            sens = [[], [], []]

            for n in full_res:

                logging.debug(f"n = {n}, type={type(n)}")
예제 #3
0
            #     print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
            #     mh_dict["fixed_scale"] = scale
            #     if scale == 0.:
            #         n_jobs = _n_jobs*10
            #     else:
            #         n_jobs = _n_jobs
            #     print("Submitting " + str(n_jobs) + " jobs")
            #     analyse(mh_dict, cluster=True, n_cpu=1, n_jobs=n_jobs)

            gamma_dict[gamma_index] = mh_dict

        res[nr_srcs] = gamma_dict

    all_res[unique_key] = res

wait_for_cluster()

logging.getLogger().setLevel("INFO")

# Create plots and save data in file data.out
for (cat_key, res_dict) in all_res.items():

    agn_type = cat_key.split("_")[0]

    xray_cat = cat_key.split(str(agn_type) + '_')[-1]

    full_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))

    full_flux = np.sum(full_cat["base_weight"])

    # neutrino flux (using joint paper) divided by AGN flux calculated with luminosity function