all_res = dict()

running_time = []
for (cat_type, method) in complete_cats_north[:1]:

    unique_key = cat_type + "_" + method

    print(unique_key)

    gamma_dict = dict()

    for gamma_index in gammas:
        res = dict()
        for j, nr_srcs in enumerate(nr_brightest_sources):

            cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
            print("Loading catalogue", cat_path, " with ", nr_srcs, "sources")
            catalogue = load_catalogue(cat_path)
            cat = np.load(cat_path)
            print("Total flux is: ", cat["base_weight"].sum() * 1e-13)
            full_name = generate_name(unique_key, nr_srcs, gamma_index)

            res_e_min = dict()
            # scale factor of neutrino injection, tuned for each energy bin
            scale_factor_per_decade = [0.2, 0.5, 1, 0.57, 0.29]

            for i, (e_min, e_max) in enumerate(bins[:]):
                full_name_en = full_name + "Emin={0:.2f}".format(e_min) + "/"

                print("Full name for ", nr_srcs, " sources is", full_name_en)
    "injection_sig_time_pdf": {
        "time_pdf_name": "steady"
    },
    "injection_energy_pdf": {
        "energy_pdf_name": "power_law",
        "gamma": 2.0
    }
}

# Create a catalogue containing the 700 brightest sources in the radioloud
# AGN core analysis. This will  be used with IC40 to stress-test the
# 'large_catalogue method for many sources.

n_sources = 150

catalogue = agn_subset_catalogue("radioloud", "radioselected", n_sources)

# These results arise from high-statistics sensitivity calculations,
# and can be considered the "true" answers. The results we obtain will be
# compared to these values.

true_parameters = [[0.0, 2.33905480645302], [14.379477037814556, 4.0]]


class TestTimeIntegrated(unittest.TestCase):
    def setUp(self):
        pass

    def test_declination_sensitivity(self):

        logging.info("Testing 'large_catalogue' MinimisationHandler class "