def get_npred_map(): position = SkyCoord(0.0, 0.0, frame="galactic", unit="deg") energy_axis = MapAxis.from_bounds(1, 100, nbin=30, unit="TeV", name="energy", interp="log") exposure = Map.create( binsz=0.02, map_type="wcs", skydir=position, width="2 deg", axes=[energy_axis], coordsys="GAL", unit="cm2 s", ) spatial_model = GaussianSpatialModel(lon_0="0 deg", lat_0="0 deg", sigma="0.2 deg", frame="galactic") spectral_model = PowerLawSpectralModel(amplitude="1e-11 cm-2 s-1 TeV-1") skymodel = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model) exposure.data = 1e14 * np.ones(exposure.data.shape) evaluator = MapEvaluator(model=skymodel, exposure=exposure) npred = evaluator.compute_npred() return evaluator, npred
def calc_bk(self, lon0, lat0, sig, amp): """ returns the computed b_k and the diffuse model template. """ # Define sky model to fit the data ind = 2.0 spatial_model = SkyGaussian(lon_0=lon0, lat_0=lat0, sigma=sig) spectral_model = PowerLaw(index=ind, amplitude=amp, reference="1 TeV") model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model) # For simulations, we can have the same npred map b_k = [] Sk_list = [] for count, bkg, exp in zip(self.count_list, self.background_list, self.exposure_list): evaluator = MapEvaluator(model=model, exposure=exp) npred = evaluator.compute_npred() geom = exp.geom diffuse_map = WcsNDMap(geom, npred) #This is Sk Bk = bkg.data Sk = diffuse_map.data Nk = count.data not_has_exposure = ~(exp.data > 0) not_has_bkg = ~(Bk > 0) S_B = np.divide(Sk, Bk) S_B[not_has_exposure] = 0.0 S_B[not_has_bkg] = 0.0 #Sk is nan for large sep.. to be investigated. temp soln #if np.isnan(np.sum(S_B)): # S_B=np.zeros(S_B.shape) delta = np.power(np.sum(Nk) / np.sum(Bk), 2.0) - 4.0 * np.sum(S_B) / np.sum(Bk) #print(np.sum(Nk),np.sum(Bk),np.sum(Sk),np.sum(S_B), delta) #print("delta is %f for obs no %s",delta,k) #bk1=(np.sum(Nk)/np.sum(Bk) - np.sqrt(delta))/2.0 bk2 = (np.sum(Nk) / np.sum(Bk) + np.sqrt(delta)) / 2.0 b_k.append(bk2) Sk_list.append(diffuse_map) return Sk_list, b_k
psf_kernel.psf_kernel_map.sum_over_axes().plot(stretch="log", add_cbar=True); # ## Background # # Let's compute a background cube, with predicted number of background events per pixel from the diffuse Galactic and isotropic model components. For this, we use the use the [gammapy.cube.MapEvaluator](http://docs.gammapy.org/dev/api/gammapy.cube.MapEvaluator.html) to multiply with the exposure and apply the PSF. The Fermi-LAT energy dispersion at high energies is small, we neglect it here. # In[ ]: model = SkyDiffuseCube(diffuse_galactic) evaluator = MapEvaluator(model=model, exposure=exposure, psf=psf_kernel) background_gal = counts.copy(data=evaluator.compute_npred()) background_gal.sum_over_axes().plot() print("Background counts from Galactic diffuse: ", background_gal.data.sum()) # In[ ]: model = SkyModel(SkyDiffuseConstant(), diffuse_iso) evaluator = MapEvaluator(model=model, exposure=exposure, psf=psf_kernel) background_iso = counts.copy(data=evaluator.compute_npred()) background_iso.sum_over_axes().plot() print("Background counts from isotropic diffuse: ", background_iso.data.sum())
) # Define some observation parameters # we are not simulating many pointings / observations pointing = SkyCoord(0.2, 0.5, unit="deg", frame="galactic") livetime = 20 * u.hour exposure_map = make_map_exposure_true_energy( pointing=pointing, livetime=livetime, aeff=aeff, geom=geom ) evaluator = MapEvaluator(model=compound_model, exposure=exposure_map) npred = evaluator.compute_npred() npred_map = WcsNDMap(geom, npred) fig, ax, cbar = npred_map.sum_over_axes().plot(add_cbar=True) ax.scatter( [lon_0_1, lon_0_2, pointing.galactic.l.degree], [lat_0_1, lat_0_2, pointing.galactic.b.degree], transform=ax.get_transform("galactic"), marker="+", color="cyan", ) # plt.show() plt.clf() rng = get_random_state(42) counts = rng.poisson(npred)
) # Define some observation parameters # we are not simulating many pointings / observations pointing = SkyCoord(0.2, 0.5, unit="deg", frame="galactic") livetime = 20 * u.hour exposure_map = make_map_exposure_true_energy( pointing=pointing, livetime=livetime, aeff=aeff, geom=geom ) evaluator = MapEvaluator(model=models, exposure=exposure_map) npred = evaluator.compute_npred() npred_map = WcsNDMap(geom, npred) fig, ax, cbar = npred_map.sum_over_axes().plot(add_cbar=True) ax.scatter( [lon_0_1, lon_0_2, pointing.galactic.l.degree], [lat_0_1, lat_0_2, pointing.galactic.b.degree], transform=ax.get_transform("galactic"), marker="+", color="cyan", ) # plt.show() plt.clf() rng = get_random_state(42) counts = rng.poisson(npred)
def run_region(self, kr, lon, lat, radius): # TODO: for now we have to read/create the allsky maps each in each job # because we can't pickle <functools._lru_cache_wrapper object # send this back to init when fixed # exposure exposure_hpx = Map.read( self.datadir + "/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz") exposure_hpx.unit = "cm2 s" # background iem infile = self.datadir + "/catalogs/fermi/gll_iem_v06.fits.gz" outfile = self.resdir + "/gll_iem_v06_extra.fits" model_iem = extrapolate_iem(infile, outfile, self.logEc_extra) # ROI roi_time = time() ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg") width = 2 * (radius + self.psf_margin) # Counts counts = Map.create( skydir=ROI_pos, width=width, proj="CAR", coordsys="GAL", binsz=self.dlb, axes=[self.energy_axis], dtype=float, ) counts.fill_by_coord({ "skycoord": self.events.radec, "energy": self.events.energy }) axis = MapAxis.from_nodes(counts.geom.axes[0].center, name="energy", unit="GeV", interp="log") wcs = counts.geom.wcs geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis]) coords = counts.geom.get_coord() # expo data = exposure_hpx.interp_by_coord(coords) exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float) # read PSF psf_kernel = PSFKernel.from_table_psf(self.psf, counts.geom, max_radius=self.psf_margin * u.deg) # Energy Dispersion e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EnergyDispersion.from_diagonal_response(e_true=e_true, e_reco=e_reco) # fit mask if coords["lon"].min() < 90 * u.deg and coords["lon"].max( ) > 270 * u.deg: coords["lon"][coords["lon"].value > 180] -= 360 * u.deg mask = ( (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg) & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg) & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg) & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg)) mask_fermi = WcsNDMap(counts.geom, mask) # IEM eval_iem = MapEvaluator(model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp) bkg_iem = eval_iem.compute_npred() # ISO eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp) bkg_iso = eval_iso.compute_npred() # merge iem and iso, only one local normalization is fitted background_total = bkg_iem + bkg_iso background_model = BackgroundModel(background_total) background_model.parameters["norm"].min = 0.0 # Sources model in_roi = self.FHL3.positions.galactic.contained_by(wcs) FHL3_roi = [] for ks in range(len(self.FHL3.table)): if in_roi[ks] == True: model = self.FHL3[ks].sky_model() model.spatial_model.parameters.freeze_all() # freeze spatial model.spectral_model.parameters["amplitude"].min = 0.0 if isinstance(model.spectral_model, PowerLawSpectralModel): model.spectral_model.parameters["index"].min = 0.1 model.spectral_model.parameters["index"].max = 10.0 else: model.spectral_model.parameters["alpha"].min = 0.1 model.spectral_model.parameters["alpha"].max = 10.0 FHL3_roi.append(model) model_total = SkyModels(FHL3_roi) # Dataset dataset = MapDataset( model=model_total, counts=counts, exposure=exposure, psf=psf_kernel, edisp=edisp, background_model=background_model, mask_fit=mask_fermi, name="3FHL_ROI_num" + str(kr), ) cat_stat = dataset.stat_sum() datasets = Datasets([dataset]) fit = Fit(datasets) results = fit.run(optimize_opts=self.optimize_opts) print("ROI_num", str(kr), "\n", results) fit_stat = datasets.stat_sum() if results.message == "Optimization failed.": pass else: datasets.to_yaml(path=Path(self.resdir), prefix=dataset.name, overwrite=True) np.save( self.resdir + "/3FHL_ROI_num" + str(kr) + "_covariance.npy", results.parameters.covariance, ) np.savez( self.resdir + "/3FHL_ROI_num" + str(kr) + "_fit_infos.npz", message=results.message, stat=[cat_stat, fit_stat], ) exec_time = time() - roi_time print("ROI", kr, " time (s): ", exec_time) for model in FHL3_roi: if (self.FHL3[model.name].data["ROI_num"] == kr and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut): flux_points = FluxPointsEstimator( datasets=datasets, e_edges=self.El_flux, source=model.name, sigma_ul=2.0, ).run() filename = self.resdir + "/" + model.name + "_flux_points.fits" flux_points.write(filename, overwrite=True) exec_time = time() - roi_time - exec_time print("ROI", kr, " Flux points time (s): ", exec_time)
e_reco = counts.geom.axes[0].edges edisp = EnergyDispersion.from_diagonal_response(e_true=e_true, e_reco=e_reco) # ## Background # # Let's compute a background cube, with predicted number of background events per pixel from the diffuse Galactic and isotropic model components. For this, we use the use the [gammapy.cube.MapEvaluator](https://docs.gammapy.org/dev/api/gammapy.cube.MapEvaluator.html) to multiply with the exposure and apply the PSF. The Fermi-LAT energy dispersion at high energies is small, we neglect it here. # In[ ]: model_diffuse = SkyDiffuseCube(diffuse_galactic, name="diffuse") eval_diffuse = MapEvaluator(model=model_diffuse, exposure=exposure, psf=psf_kernel, edisp=edisp) background_gal = eval_diffuse.compute_npred() background_gal.sum_over_axes().plot() print("Background counts from Galactic diffuse: ", background_gal.data.sum()) # In[ ]: model_iso = SkyModel(ConstantSpatialModel(), diffuse_iso, name="diffuse-iso") eval_iso = MapEvaluator(model=model_iso, exposure=exposure, edisp=edisp) background_iso = eval_iso.compute_npred() background_iso.sum_over_axes().plot(add_cbar=True) print("Background counts from isotropic diffuse: ", background_iso.data.sum())