def find_ns_scale(self): """Find the number of neutrinos corresponding to flux""" # x = sorted([float(x) for x in self.results.keys()]) try: # if weights were not fitted, number of neutrinos is stored in just one parameter if "n_s" in self.inj[self.scales[1]]: self.flux_to_ns = self.inj[self.scales[1]]["n_s"] / k_to_flux( self.scales_float[1]) # if weights were fitted, or for cluster search, there is one n_s for each fitted source else: sc_dict = self.inj[self.scales[1]] self.flux_to_ns = sum([ sc_dict[k] for k in sc_dict if "n_s" in str(k) ]) / k_to_flux(self.scales_float[1]) logger.debug( f"Conversion ratio of flux to n_s: {self.flux_to_ns:.2f}") except KeyError as e: logger.warning( f'KeyError: key "n_s" not found and minimizer is {self.mh_name}!!: {e}' )
def calculate_single_source(self, source, scale): # Calculate the effective injection time for simulation. Equal to # the overlap between the season and the injection time PDF for # the source, scaled if the injection PDF is not uniform in time. eff_inj_time = self.sig_time_pdf.effective_injection_time(source) # All injection fluxes are given in terms of k, equal to 1e-9 inj_flux = k_to_flux(source["injection_weight_modifier"] * scale) # Fraction of total flux allocated to given source, assuming # standard candles with flux proportional to 1/d^2 multiplied by the # sources weight weight = calculate_source_weight(source) / self.weight_scale # Calculate the fluence, using the effective injection time. fluence = inj_flux * eff_inj_time * weight def source_eff_area(e): return self.effective_area_f(np.log10(e), np.sin( source["dec_rad"])) * self.energy_pdf.f(e) int_eff_a = self.energy_pdf.integrate_over_E(source_eff_area) # Effective areas are given in m2, but flux is in per cm2 int_eff_a *= 10**4 n_inj = fluence * int_eff_a return n_inj
def calculate_fluence(self, source, scale, source_mc, band_mask, omega): """Function to calculate the fluence for a given source, and multiply the oneweights by this. After this step, the oneweight sum is equal to the expected neutrino number. :param source: Source to be calculated :param scale: Flux scale :param source_mc: MC that is close to source :param band_mask: Closeness mask for MC :param omega: Solid angle covered by MC mask :return: Modified source MC """ # Calculate the effective injection time for simulation. Equal to # the overlap between the season and the injection time PDF for # the source, scaled if the injection PDF is not uniform in time. eff_inj_time = self.sig_time_pdf.effective_injection_time(source) # All injection fluxes are given in terms of k, equal to 1e-9 inj_flux = k_to_flux(source["injection_weight_modifier"] * scale) # Fraction of total flux allocated to given source, assuming # standard candles with flux proportional to 1/d^2 multiplied by the # sources weight weight = calculate_source_weight(source) / self.weight_scale # Calculate the fluence, using the effective injection time. fluence = inj_flux * eff_inj_time * weight # Recalculates the oneweights to account for the declination # band, and the relative distance of the sources. # Multiplies by the fluence, to enable calculations of n_inj, # the expected number of injected events source_mc["ow"] = fluence * self.mc_weights[band_mask] / omega return source_mc
# # # # # print astro_disc[key], rh.disc_potential, guess_disc # # # raw_input("prompt") # # # disc_25.append(astro_disc[key] * inj_time * disc_convert) # # # sens_e.append(astro_sens[e_key] * inj_time) # disc_e.append(astro_disc[e_key] * inj_time) # disc_25_e.append(astro_disc[e_key] * inj_time * disc_convert) cat = load_catalogue(rh_dict["catalogue"]) try: guess = k_to_flux( rh_dict["scale"] / 1.5 ) except KeyError: guess = np.nan astro_guess = calculate_astronomy( guess, rh_dict["inj_dict"]["injection_energy_pdf"], cat ) guess_disc.append(astro_guess[key] * inj_time) guess_disc_e.append(astro_guess[e_key] * inj_time) dist.append(max(cat["distance_mpc"])) n.append(float(len(cat)))
def estimate_discovery_potential(seasons, inj_dict, sources, llh_dict, raw_scale=1.0): """Function to estimate discovery potential given an injection model. It assumes an optimal LLH construction, i.e aligned time windows and correct energy weighting etc. Takes injectors and seasons. :param injectors: Injectors to be used :param sources: Sources to be evaluated :return: An estimate for the discovery potential """ logging.info("Trying to guess scale using AsimovEstimator.") season_bkg = [] season_sig = [] def weight_f(n_s, n_bkg): metric = np.array(n_s) #/np.sqrt(np.array(n_bkg)) return 1. #metric #/ np.mean(metric)#/ max(metric) def ts_weight(n_s): return 1. # return n_s / np.sum(n_s) # def weight_ts(ts, n_s) weight_scale = calculate_source_weight(sources) livetime = 0. n_s_tot = 0. n_tot = 0. n_tot_coincident = 0. all_ns = [] all_nbkg = [] all_ts = [] all_bkg_ts = [] final_ts = [] new_n_s = 0. new_n_bkg = 0. for season in seasons.values(): new_llh_dict = dict(llh_dict) new_llh_dict["llh_name"] = "fixed_energy" new_llh_dict["llh_energy_pdf"] = inj_dict["injection_energy_pdf"] llh = LLH.create(season, sources, new_llh_dict) data = season.get_background_model() n_tot += np.sum(data["weight"]) livetime += llh.bkg_time_pdf.livetime * 60 * 60 * 24 def signalness(sig_over_background): """Converts a signal over background ratio into a signal probability. This is ratio/(1 + ratio) :param sig_over_background: Ratio of signal to background probability :return: Percentage probability of signal """ return sig_over_background / (1. + sig_over_background) n_sigs = [] n_bkgs = [] ts_vals = [] bkg_vals = [] n_s_season = 0. # n_exp = np.sum(inj.n_exp["n_exp"]) * raw_scale sig_times = np.array( [llh.sig_time_pdf.effective_injection_time(x) for x in sources]) source_weights = np.array( [calculate_source_weight(x) for x in sources]) mean_time = np.sum(sig_times * source_weights) / weight_scale # print(source_weights) fluences = np.array( [x * sig_times[i] for i, x in enumerate(source_weights)]) / weight_scale # print(sources.dtype.names) # print(sources["dec_rad"], np.sin(sources["dec_rad"])) # print(fluences) # input("?") res = np.histogram(np.sin(sources["dec_rad"]), bins=season.sin_dec_bins, weights=fluences) dummy_sources = [] bounds = [] n_eff_sources = [] for i, w in enumerate(res[0]): if w > 0: lower = res[1][i] upper = res[1][i + 1] mid = np.mean([upper, lower]) mask = np.logical_and( np.sin(sources["dec_rad"]) > lower, np.sin(sources["dec_rad"]) < upper) n_eff_sources.append( (np.sum(fluences[mask])**2. / np.sum(fluences[mask]**2))) # print(n_eff_sources) # print(fluences[mask]) # # tester = np.array([1.5, 1.5, 1.5]) # # # print(np.sum(tester**2)/(np.mean(tester)**2.)) # input("?") dummy_sources.append((np.arcsin(mid), res[0][i], 1., 1., "dummy_{0}".format(mid))) bounds.append((lower, upper)) dummy_sources = np.array(dummy_sources, dtype=np.dtype([("dec_rad", np.float), ("base_weight", np.float), ("distance_mpc", np.float), ("injection_weight_modifier", np.float), ("source_name", np.str)])) inj = season.make_injector(dummy_sources, **inj_dict) for j, dummy_source in enumerate(dummy_sources): lower, upper = bounds[j] n_eff = n_eff_sources[j] source_mc = inj.calculate_single_source(dummy_source, scale=raw_scale) if len(source_mc) == 0: logging.warning( "Warning, no MC found for dummy source at declinbation ". format(np.arcsin(lower), np.arcsin(upper))) ts_vals.append(0.0) n_sigs.append(0.0) n_bkgs.append(0.0) else: # Gives the solid angle coverage of the sky for the band omega = 2. * np.pi * (upper - lower) data_mask = np.logical_and( np.greater(data["dec"], np.arcsin(lower)), np.less(data["dec"], np.arcsin(upper))) local_data = data[data_mask] data_weights = signalness( llh.energy_weight_f(local_data)) * local_data["weight"] # print("source_mc", source_mc) mc_weights = signalness(llh.energy_weight_f(source_mc)) true_errors = angular_distance(source_mc["ra"], source_mc["dec"], source_mc["trueRa"], source_mc["trueDec"]) # median_sigma = weighted_quantile( # true_errors, 0.5, source_mc["ow"] * mc_weights) median_sigma = np.mean(local_data["sigma"]) area = np.pi * (2.0 * median_sigma)**2 / np.cos( dummy_source["dec_rad"]) local_rate = np.sum(data_weights) # n_bkg = local_rate * area # * source_weight n_bkg = np.sum(local_data["weight"]) n_tot_coincident += n_bkg ratio_time = livetime / mean_time sig_spatial = signalness((1. / (2. * np.pi * source_mc["sigma"] ** 2.) * np.exp(-0.5 * ( (true_errors / source_mc["sigma"]) ** 2.))) \ / llh.spatial_pdf.background_spatial(source_mc)) ra_steps = np.linspace(-np.pi, np.pi, 100) dec_steps = np.linspace(lower, upper, 10) mean_dec = np.mean( signalness( norm.pdf(dec_steps, scale=median_sigma / np.cos(dummy_source["dec_rad"]), loc=np.mean([lower, upper])) * (upper - lower))) mean_ra = np.mean( signalness( norm.pdf(ra_steps, scale=median_sigma, loc=0.) * 2. * np.pi)) bkg_spatial = mean_dec * mean_ra # * n_eff n_s_tot += np.sum(source_mc["ow"]) n_s_season += np.sum(source_mc["ow"]) med_sig = np.mean(sig_spatial * mc_weights) * signalness( ratio_time) * np.sum(source_mc["ow"]) med_bkg = np.mean(bkg_spatial * data_weights) * ( 1. - signalness(ratio_time)) * n_bkg new_n_s += med_sig new_n_bkg += med_bkg scaler_ratio = new_n_s / n_s_tot scaler_ratio = new_n_bkg / n_tot_coincident print("Scaler Ratio", scaler_ratio) disc_count = norm.ppf(norm.cdf(5.0), loc=0., scale=np.sqrt(new_n_bkg)) # * scaler_ratio simple = 5. * np.sqrt(new_n_bkg) # * scaler_ratio # # disc_count = simple # print(disc_count, simple, simple/disc_count, n_s_tot) # # print("testerer", new_n_s, new_n_bkg) # print("Disc count", disc_count, disc_count / scaler_ratio) scale = disc_count / new_n_s print(scale) # Convert from scale factor to flux units scale = k_to_flux(scale) * raw_scale logging.info( "Estimated Discovery Potential is: {:.3g} GeV sr^-1 s^-1 cm^-2".format( scale)) return scale
def get_time_integrated_flux(self): return k_to_flux(self.bkg_flux_model.get_norm() * self.get_time_pdf().effective_injection_time())
print(" ") print(" ") int_xray = np.sum(cat["base_weight"] / 1e13 * 624.151) int_xray_flux.append(int_xray) # GeV cm-2 s-1 int_xray_flux_erg.append(np.sum(cat["base_weight"]) / 1e13) # erg # cm-2 s-1 fracs.append(np.sum(cat["base_weight"]) / full_flux) try: rh = ResultsHandler(rh_dict) print("Sens", rh.sensitivity) print("Disc", rh.disc_potential) print("Guess", rh_dict["scale"]) # guess.append(k_to_flux(rh_dict["scale"])* 2./3.) guess.append(k_to_flux(rh_dict["scale"]) / 3.) astro_sens, astro_disc = rh.astro_values( rh_dict["inj_dict"]["injection_energy_pdf"]) key = "Total Fluence (GeV cm^{-2} s^{-1})" sens_livetime.append( astro_sens[key]) # fluence=integrated over energy disc_pots_livetime.append(astro_disc[key]) ratio_sens.append(astro_sens[key] / int_xray) # fluence # normalized over tot xray flux ratio_disc.append(astro_disc[key] / int_xray) n_src.append(nr_srcs)
def plot_bias(self): x = sorted(self.results.keys()) raw_x = [scale_shortener(i) for i in sorted([float(j) for j in x])] base_x = [k_to_flux(float(j)) for j in raw_x] base_x_label = r"$\Phi_{1GeV}$ (GeV$^{-1}$ cm$^{-2}$)" for i, param in enumerate(self.param_names): try: plt.figure() ax = plt.subplot(111) meds = [] ulims = [] llims = [] trues = [] for scale in raw_x: vals = self.results[scale]["Parameters"][param] if self.bias_error == "std": med = np.median(vals) meds.append(med) sig = np.std(vals) ulims.append(med + sig) llims.append(med - sig) elif self.bias_error == "ci90": med, llim, ulim = np.quantile(vals, [0.5, 0.05, 0.95]) meds.append(med) llims.append(llim) ulims.append(ulim) else: raise ValueError( f"Invalid value {self.bias_error} for bias_error!") true = self.inj[scale][param] trues.append(true) do_ns_scale = False if "n_s" in param: x = trues x_label = r"$n_{injected}$" + param.replace("n_s", "") else: x = base_x x_label = base_x_label # decide wether to plot a second x axis on the top axis indicating the number of injected # neutrinos instead of the flux if "gamma" in param: if not isinstance(self.flux_to_ns, type(None)): do_ns_scale = True ns_scale = ns_scale_label = None if do_ns_scale: ns_scale = self.flux_to_ns * max(base_x) ns_scale_label = "Number of neutrinos" plt.scatter(x, meds, color="orange") plt.plot(x, meds, color="black") plt.plot(x, trues, linestyle="--", color="red") plt.fill_between(x, ulims, llims, alpha=0.5, color="orange") try: ax.set_xlim(left=0.0, right=max(x)) if min(trues) == 0.0: ax.set_ylim(bottom=0.0) if do_ns_scale: ax2 = ax.twiny() ax2.grid(0) ax2.set_xlim(0.0, ns_scale) ax2.set_xlabel(ns_scale_label) except ValueError as e: logger.warning(f"{param}: {e}") ax.set_xlabel(x_label) ax.set_ylabel(param) plt.title("Bias (" + param + ")") savepath = os.path.join(self.plot_dir, "bias_" + param + ".pdf") logger.info("Saving bias plot to {0}".format(savepath)) try: os.makedirs(os.path.dirname(savepath)) except OSError: pass plt.tight_layout() plt.savefig(savepath) except KeyError as e: logger.warning( f"KeyError for {param}: {e}! Can not make bias plots!") finally: plt.close()
def find_disc_potential(self): ts_path = os.path.join(self.plot_dir, "ts_distributions/0.pdf") try: bkg_dict = self.results[scale_shortener(0.0)] except KeyError: logger.error("No key equal to '0'") return bkg_ts = bkg_dict["TS"] disc_threshold = plot_background_ts_distribution(bkg_ts, ts_path, ts_type=self.ts_type) self.disc_ts_threshold = disc_threshold bkg_median = np.median(bkg_ts) x = sorted(self.results.keys()) y = [] y_25 = [] x = [scale_shortener(i) for i in sorted([float(j) for j in x])] for scale in x: ts_array = np.array(self.results[scale]["TS"]) frac = float(len(ts_array[ts_array > disc_threshold])) / (float( len(ts_array))) logger.info( "Fraction of overfluctuations is {0:.2f} above {1:.2f} (N_trials={2}) (Scale={3})" .format(frac, disc_threshold, len(ts_array), scale)) y.append(frac) frac_25 = float(len(ts_array[ts_array > 25.0])) / (float( len(ts_array))) logger.info( "Fraction of overfluctuations is {0:.2f} above 25 (N_trials={1}) (Scale={2})" .format(frac_25, len(ts_array), scale)) y_25.append(frac_25) self.make_plots(scale) x = np.array([float(s) for s in x]) x_flux = k_to_flux(x) threshold = 0.5 sols = [] for i, y_val in enumerate([y, y_25]): def f(x, a, b, c): value = scipy.stats.gamma.cdf(x, a, b, c) return value best_f = None try: res = scipy.optimize.curve_fit( f, x, y_val, p0=[6, -0.1 * max(x), 0.1 * max(x)]) best_a = res[0][0] best_b = res[0][1] best_c = res[0][2] def best_f(x): return f(x, best_a, best_b, best_c) sol = scipy.stats.gamma.ppf(0.5, best_a, best_b, best_c) setattr(self, ["disc_potential", "disc_potential_25"][i], k_to_flux(sol)) except RuntimeError as e: logger.warning(f"RuntimeError for discovery potential!: {e}") xrange = np.linspace(0.0, 1.1 * max(x), 1000) savepath = os.path.join(self.plot_dir, "disc" + ["", "_25"][i] + ".pdf") fig = plt.figure() ax1 = fig.add_subplot(111) ax1.scatter(x_flux, y_val, color="black") if not isinstance(best_f, type(None)): ax1.plot(k_to_flux(xrange), best_f(xrange), color="blue") ax1.axhline(threshold, lw=1, color="red", linestyle="--") ax1.axvline(self.sensitivity, lw=2, color="black", linestyle="--") ax1.axvline(self.disc_potential, lw=2, color="red") ax1.set_ylim(0.0, 1.0) ax1.set_xlim(0.0, k_to_flux(max(xrange))) ax1.set_ylabel( r"Overfluctuations relative to 5 $\sigma$ Threshold") plt.xlabel( r"Flux Normalisation @ 1GeV [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$]") if not np.isnan(self.flux_to_ns): ax2 = ax1.twiny() ax2.grid(0) ax2.set_xlim(0.0, self.flux_to_ns * k_to_flux(max(xrange))) ax2.set_xlabel(r"Number of neutrinos") fig.savefig(savepath) plt.close() if self.disc_potential > max(x_flux): self.extrapolated_disc = True msg = "" if self.extrapolated_disc: msg = "EXTRAPOLATED " logger.info("{0}Discovery Potential is {1:.3g}".format( msg, self.disc_potential)) logger.info("Discovery Potential (TS=25) is {0:.3g}".format( self.disc_potential_25))
def sensitivity_fit(self, savepath, ts_val): x, y, yerr = self.overfluctuations[ts_val] x_flux = k_to_flux(x) threshold = 0.9 b = 1 - min(y) def f(x, a): value = 1 - b * np.exp(-a * x) return value popt, pcov = scipy.optimize.curve_fit(f, x, y, sigma=yerr, absolute_sigma=True, p0=[1.0 / max(x)]) perr = np.sqrt(np.diag(pcov)) best_a = popt[0] def best_f(x, sd=0.0): a = best_a + perr * sd return f(x, a) fit = k_to_flux((1.0 / best_a) * np.log(b / (1 - threshold))) if fit > max(x_flux): extrapolation_msg = ( "The sensitivity is beyond the range of the tested scales." "The number is probably not good.") if self.allow_extrapolation: logger.warning(extrapolation_msg) extrapolated = True else: raise OverfluctuationError(extrapolation_msg) else: extrapolated = False xrange = np.linspace(0.0, 1.1 * max(x), 1000) lower = k_to_flux( (1.0 / (best_a + perr)) * np.log(b / (1 - threshold))) upper = k_to_flux( (1.0 / (best_a - perr)) * np.log(b / (1 - threshold))) fig = plt.figure() ax1 = fig.add_subplot(111) ax1.errorbar(x_flux, y, yerr=yerr, color="black", fmt=" ", marker="o") ax1.plot(k_to_flux(xrange), best_f(xrange), color="blue") ax1.fill_between( k_to_flux(xrange), best_f(xrange, 1), best_f(xrange, -1), color="blue", alpha=0.1, ) ax1.axhline(threshold, lw=1, color="red", linestyle="--") ax1.axvline(fit, lw=2, color="red") ax1.axvline(lower, lw=2, color="red", linestyle=":") ax1.axvline(upper, lw=2, color="red", linestyle=":") ax1.set_ylim(0.0, 1.0) ax1.set_xlim(0.0, k_to_flux(max(xrange))) ax1.set_ylabel("Overfluctuations above TS=" + "{:.2f}".format(ts_val)) plt.xlabel( r"Flux Normalisation @ 1GeV [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$]") if not np.isnan(self.flux_to_ns): ax2 = ax1.twiny() ax2.grid(0) ax2.set_xlim(0.0, self.flux_to_ns * k_to_flux(max(xrange))) ax2.set_xlabel(r"Number of neutrinos") fig.savefig(savepath) plt.close() if len(np.where(np.array(y) < 0.95)[0]) < 2: raise OverfluctuationError( f"Not enough points with overfluctuations under 95%, lower injection scale!" ) sens_err = np.array([fit - lower, upper - fit]).T[0] self.sensitivity = fit self.extrapolated_sens = extrapolated self.sensitivity_err = sens_err return fit, extrapolated, sens_err
def ns(self): """returns the injection scales converted to number of signal neutrinos""" ns = np.array([k_to_flux(float(s)) for s in self.scales]) * self.flux_to_ns return ns
# # astro_sens, astro_disc = rh.astro_values( rh_dict["inj_dict"]["injection_energy_pdf"]) # # energy_pdf = EnergyPDF.create( # rh_dict["inj_dict"]["injection_energy_pdf"]) # # raw_input("prompt") disc_convert = rh.disc_potential_25/rh.disc_potential # sens.append(astro_sens[key] * inj_time) disc.append(astro_disc[key] * inj_time) guess_convert = k_to_flux( rh_dict["scale"] / 1.5 )/rh.disc_potential # # # print astro_disc[key], rh.disc_potential, guess_disc # # raw_input("prompt") # disc_25.append(astro_disc[key] * inj_time * disc_convert) # sens_e.append(astro_sens[e_key] * inj_time) disc_e.append(astro_disc[e_key] * inj_time) disc_25_e.append(astro_disc[e_key] * inj_time * disc_convert) cat = load_catalogue(rh_dict["catalogue"]) guess = k_to_flux( rh_dict["scale"] / 1.5