def make_mean_rmf(energy_true, energy_reco, center, ObsList, outdir, source_name=""): """ Compute the mean psf for a set of observation and a given energy band Parameters ---------- energy_true: Tuple for the energy true bin: (Emin,Emax,nbins) energy_reco: Tuple for the energy reco bin: (Emin,Emax,nbins) center: SkyCoord of the source ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example) outdir: directory where the fits image will go source_name: name of the source for which you want to compute the mean RMF Returns ------- """ # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...) emin_true, emax_true, nbin_true = energy_true emin_reco, emax_reco, nbin_reco = energy_reco energy_true_bins = EnergyBounds.equal_log_spacing(emin_true, emax_true, nbin_true, 'TeV') energy_reco_bins = EnergyBounds.equal_log_spacing(emin_reco, emax_reco, nbin_reco, 'TeV') rmf = ObsList.make_mean_edisp(position=center, e_true=energy_true_bins, e_reco=energy_reco_bins) rmf.write(outdir + "/mean_rmf" + source_name + ".fits", clobber=True)
def make_mean_rmf(energy_true, energy_reco, center, ObsList): """ Compute the mean psf for a set of observation and a given energy band Parameters ---------- energy_true: Tuple for the energy axis: (Emin,Emax,nbins) for the true energy array energy_reco: Tuple for the energy axis: (Emin,Emax,nbins) for the reco energy array source_name: name of the source you want to compute the image center: SkyCoord of the source ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example) Returns ------- rmf: `~gammapy.irf.EnergyDispersion` Stacked EDISP for a set of observation """ # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...) emin_true, emax_true, nbin_true = energy_true emin_reco, emax_reco, nbin_reco = energy_reco energy_true_bins = EnergyBounds.equal_log_spacing(emin_true, emax_true, nbin_true, 'TeV') energy_reco_bins = EnergyBounds.equal_log_spacing(emin_reco, emax_reco, nbin_reco, 'TeV') rmf = ObsList.make_mean_edisp(position=center, e_true=energy_true_bins, e_reco=energy_reco_bins) return rmf
def read(cls, filename, offset='0.5 deg'): """Read from a FITS file. Compute RMF at 0.5 deg offset on fly. Parameters ---------- filename : `str` File containing the IRFs """ filename = str(make_path(filename)) with fits.open(filename, memmap=False) as hdulist: aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist) edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION') bkg = BgRateTable.from_hdulist(hdulist=hdulist) psf = Psf68Table.from_hdulist(hdulist=hdulist) sens = SensitivityTable.from_hdulist(hdulist=hdulist) # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area) e_reco_min = bkg.energy.lo[0] e_reco_max = bkg.energy.hi[-1] e_reco_bin = bkg.energy.nbins e_reco_axis = EnergyBounds.equal_log_spacing( e_reco_min, e_reco_max, e_reco_bin, 'TeV', ) e_true_min = aeff.energy.lo[0] e_true_max = aeff.energy.hi[-1] e_true_bin = aeff.energy.nbins e_true_axis = EnergyBounds.equal_log_spacing( e_true_min, e_true_max, e_true_bin, 'TeV', ) rmf = edisp.to_energy_dispersion( offset=offset, e_reco=e_reco_axis, e_true=e_true_axis, ) return cls(aeff=aeff, bkg=bkg, edisp=edisp, psf=psf, sens=sens, rmf=rmf)
def get_psf_table(psf, emin, emax, bins): """Returns a table of energy and containment radius from an EnergyDependentTablePSF object.""" # Container for data data = [] # Loop over energies and determine PSF containment radius ebounds = EnergyBounds.equal_log_spacing(emin, emax, bins, 'MeV') for energy in ebounds: energy_psf = psf.table_psf_at_energy(energy) containment_68 = energy_psf.containment_radius(0.68) containment_95 = energy_psf.containment_radius(0.95) row = dict(ENERGY=energy.value, CONT_68=containment_68.value, CONT_95=containment_95.value) data.append(row) # Construct table and add correct units to columns table = Table(data) table['ENERGY'].units = energy.unit table['CONT_68'].units = containment_68.unit table['CONT_95'].units = containment_95.unit return table
def load_irf(self): filename = os.path.join(self.outdir, "irf.fits.gz") with fits.open(filename, memmap=False) as hdulist: aeff = EffectiveAreaTable2D.from_hdulist(hdulist=hdulist) edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION") bkg_fits_table = hdulist["BACKGROUND"] bkg_table = Table.read(bkg_fits_table) energy_lo = bkg_table["ENERG_LO"].quantity energy_hi = bkg_table["ENERG_HI"].quantity bkg = bkg_table["BGD"].quantity axes = [ BinnedDataAxis(energy_lo, energy_hi, interpolation_mode="log", name="energy") ] bkg = BkgData(data=NDDataArray(axes=axes, data=bkg)) # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area) e_reco_min = bkg.energy.lo[0] e_reco_max = bkg.energy.hi[-1] e_reco_bin = bkg.energy.nbins e_reco_axis = EnergyBounds.equal_log_spacing(e_reco_min, e_reco_max, e_reco_bin, "TeV") e_true_min = aeff.data.axes[0].lo[0] e_true_max = aeff.data.axes[0].hi[-1] e_true_bin = len(aeff.data.axes[0].bins) - 1 e_true_axis = EnergyBounds.equal_log_spacing(e_true_min, e_true_max, e_true_bin, "TeV") # Fake offset... rmf = edisp.to_energy_dispersion(offset=0.5 * u.deg, e_reco=e_reco_axis, e_true=e_true_axis) # This is required because in gammapy v0.8 # gammapy.spectrum.utils.integrate_model # calls the attribute aeff.energy which is an attribute of # EffectiveAreaTable and not of EffectiveAreaTable2D # WARNING the angle is not important, but only because we started with # on-axis data! TO UPDATE aeff = aeff.to_effective_area_table(Angle("1d")) self.irf = Irf(bkg=bkg, aeff=aeff, rmf=rmf)
def read(cls, filename, offset='0.5 deg'): """Read from a FITS file. Compute RMF at 0.5 deg offset on fly. Parameters ---------- filename : `str` File containing the IRFs """ filename = str(make_path(filename)) with fits.open(filename, memmap=False) as hdulist: aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist) edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION') bkg = BgRateTable.from_hdulist(hdulist=hdulist) psf = Psf68Table.from_hdulist(hdulist=hdulist) sens = SensitivityTable.from_hdulist(hdulist=hdulist) # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area) e_reco_min = bkg.energy.lo[0] e_reco_max = bkg.energy.hi[-1] e_reco_bin = bkg.energy.nbins e_reco_axis = EnergyBounds.equal_log_spacing( e_reco_min, e_reco_max, e_reco_bin, 'TeV', ) e_true_min = aeff.energy.lo[0] e_true_max = aeff.energy.hi[-1] e_true_bin = aeff.energy.nbins e_true_axis = EnergyBounds.equal_log_spacing( e_true_min, e_true_max, e_true_bin, 'TeV', ) rmf = edisp.to_energy_dispersion( offset=offset, e_reco=e_reco_axis, e_true=e_true_axis, ) return cls( aeff=aeff, bkg=bkg, edisp=edisp, psf=psf, sens=sens, rmf=rmf )
def make_counts_array(): """Make an example counts array with energy and offset axes.""" data_store = DataStore.from_dir(gammapy_extra.dir / 'datasets/hess-crab4') event_lists = data_store.load_all('events') ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV') offset = np.linspace(0, 2.5, 100) array = EnergyOffsetArray(ebounds, offset) array.fill_events(event_lists) return array
def table_model(): energy_edges = EnergyBounds.equal_log_spacing(0.1 * u.TeV, 100 * u.TeV, 1000) energy = energy_edges.log_centers index = 2.3 * u.Unit('') amplitude = 4 / u.cm ** 2 / u.s / u.TeV reference = 1 * u.TeV pl = PowerLaw(index, amplitude, reference) flux = pl(energy) return TableModel(energy, flux, 1 * u.Unit(''))
def make_counts_array(): """Make an example counts array with energy and offset axes.""" data_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2') event_lists = data_store.load_all('events') ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV') offset = Angle(np.linspace(0, 2.5, 100), "deg") array = EnergyOffsetArray(ebounds, offset) array.fill_events(event_lists) return array
def load_irf(self): filename = os.path.join(self.outdir, 'irf.fits.gz') with fits.open(filename, memmap=False) as hdulist: aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist) edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION") bkg_fits_table = hdulist["BACKGROUND"] bkg_table = Table.read(bkg_fits_table) energy_lo = bkg_table["ENERG_LO"].quantity energy_hi = bkg_table["ENERG_HI"].quantity bkg = bkg_table["BGD"].quantity axes = [ BinnedDataAxis( energy_lo, energy_hi, interpolation_mode="log", name="energy" ) ] bkg = BkgData(data=NDDataArray(axes=axes, data=bkg)) # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area) e_reco_min = bkg.energy.lo[0] e_reco_max = bkg.energy.hi[-1] e_reco_bin = bkg.energy.nbins e_reco_axis = EnergyBounds.equal_log_spacing( e_reco_min, e_reco_max, e_reco_bin, "TeV" ) e_true_min = aeff.energy.lo[0] e_true_max = aeff.energy.hi[-1] e_true_bin = aeff.energy.nbins e_true_axis = EnergyBounds.equal_log_spacing( e_true_min, e_true_max, e_true_bin, "TeV" ) # Fake offset... rmf = edisp.to_energy_dispersion( offset=0.5 * u.deg, e_reco=e_reco_axis, e_true=e_true_axis ) self.irf = Irf(bkg=bkg, aeff=aeff, rmf=rmf)
def make_psf_cube(image_size, energy_cube, source_name, center_maps, center, ObsList, outdir, spectral_index=2.3): """ Compute the mean psf for a set of observation for different energy bands Parameters ---------- image_size:int, Total number of pixel of the 2D map energy: Tuple for the energy axis: (Emin,Emax,nbins) source_name: name of the source you want to compute the image center_maps: SkyCoord center of the images center: SkyCoord position where we want to compute the psf ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example) outdir: directory where the fits image will go spectral_index: assumed spectral index to compute the psf Returns ------- """ ref_cube = make_empty_cube(image_size, energy_cube, center_maps) header = ref_cube.sky_image_ref.to_image_hdu().header energy_bins = ref_cube.energies(mode="edges") for i_E, E in enumerate(energy_bins[0:-1]): energy_band = Energy( [energy_bins[i_E].value, energy_bins[i_E + 1].value], energy_bins.unit) energy = EnergyBounds.equal_log_spacing(energy_band[0].value, energy_band[1].value, 100, energy_band.unit) # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...) psf_energydependent = ObsList.make_mean_psf(center, energy, theta=None) try: psf_table = psf_energydependent.table_psf_in_energy_band( energy_band, spectral_index=spectral_index) except: psf_table = TablePSF( psf_energydependent.offset, Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1)) ref_cube.data[i_E, :, :] = fill_acceptance_image( header, center_maps, psf_table._offset.to("deg"), psf_table._dp_domega, psf_table._offset.to("deg")[-1]).data ref_cube.write(outdir + "/mean_psf_cube_" + source_name + ".fits", format="fermi-counts")
def make_mean_psf_cube(image_size, energy_cube, center_maps, center, ObsList, spectral_index=2.3): """ Compute the mean psf for a set of observation for different energy bands Parameters ---------- image_size:int, Total number of pixel of the 2D map energy: Tuple for the energy axis: (Emin,Emax,nbins) center_maps: SkyCoord center of the images center: SkyCoord position where we want to compute the psf ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example) spectral_index: assumed spectral index to compute the psf Returns ------- ref_cube : `~gammapy.cube.SkyCube` PSF mean cube """ ref_cube = make_empty_cube(image_size, energy_cube, center_maps) header = ref_cube.sky_image_ref.to_image_hdu().header energy_bins = ref_cube.energies() for i_E, E in enumerate(energy_bins[0:-1]): energy_band = Energy( [energy_bins[i_E].value, energy_bins[i_E + 1].value], energy_bins.unit) energy = EnergyBounds.equal_log_spacing(energy_band[0].value, energy_band[1].value, 100, energy_band.unit) psf_energydependent = ObsList.make_psf(center, energy, theta=None) try: psf_table = psf_energydependent.table_psf_in_energy_band( energy_band, spectral_index=spectral_index) except: psf_table = TablePSF( psf_energydependent.offset, Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1)) ref_cube.data[i_E, :, :] = fill_acceptance_image( header, center_maps, psf_table._offset.to("deg"), psf_table._dp_domega, psf_table._offset.to("deg")[-1]).data return ref_cube
def plot_energy_hist(self, ax=None, ebounds=None, **kwargs): """ A plot showing counts as a function of energy. Convert to a `~gammapy.spectrum.CountsSpectrum` internally """ if ebounds is None: emin = np.min(self['ENERGY'].quantity) emax = np.max(self['ENERGY'].quantity) ebounds = EnergyBounds.equal_log_spacing(emin, emax, 100) from gammapy.spectrum import CountsSpectrum spec = CountsSpectrum.from_eventlist(self, ebounds) spec.plot(ax=ax, **kwargs) return ax
def make_1d_expected_counts(self, spectral_index=2.3, for_integral_flux=False, eref=None): """Compute the 1D exposure table for one observation for an offset table. Parameters ---------- spectral_index : float Assumed power-law spectral index for_integral_flux : bool True if you want that the total excess / exposure gives the integrated flux eref: `~gammapy.utils.energy.Energy` Reference energy at which you want to compute the exposure. Default is the log center of the energy band of the image. Returns ------- table : `astropy.table.Table` Two columns: offset in the FOV "theta" and expected counts "npred" """ energy = EnergyBounds.equal_log_spacing(self.energy_band[0].value, self.energy_band[1].value, 100, self.energy_band.unit) energy_band = energy.bands energy_bin = energy.log_centers if not eref: eref = EnergyBounds(self.energy_band).log_centers spectrum = (energy_bin / eref)**(-spectral_index) offset = Angle( np.linspace(self.offset_band[0].value, self.offset_band[1].value, 10), self.offset_band.unit) arf = self.aeff.data.evaluate(offset=offset, energy=energy_bin).T npred = np.sum(arf * spectrum * energy_band, axis=1) npred *= self.livetime if for_integral_flux: norm = np.sum(spectrum * energy_band) npred /= norm table = Table() table['theta'] = offset table['npred'] = npred return table
def make_psf(energy_band, source_name, center, ObsList, outdir, spectral_index=2.3): """ Compute the mean psf for a set of observation and a given energy band Parameters ---------- energy_band: energy band on which you want to compute the map source_name: name of the source you want to compute the image center: SkyCoord of the source ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example) outdir: directory where the fits image will go spectral_index: assumed spectral index to compute the psf Returns ------- """ energy = EnergyBounds.equal_log_spacing(energy_band[0].value, energy_band[1].value, 100, energy_band.unit) # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...) psf_energydependent = ObsList.make_mean_psf(center, energy, theta=None) #import IPython; IPython.embed() try: psf_table = psf_energydependent.table_psf_in_energy_band( energy_band, spectral_index=spectral_index) except: psf_table = TablePSF( psf_energydependent.offset, Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1)) Table_psf = Table() c1 = Column(psf_table._dp_domega, name='psf_value', unit=psf_table._dp_domega.unit) c2 = Column(psf_table._offset, name='theta', unit=psf_table._offset.unit) Table_psf.add_column(c1) Table_psf.add_column(c2) filename_psf = outdir + "/psf_table_" + source_name + "_" + str( energy_band[0].value) + '_' + str(energy_band[1].value) + ".fits" Table_psf.write(filename_psf, overwrite=True)
def make_model(): dir = str(gammapy_extra.dir) + '/datasets/hess-crab4-hd-hap-prod2' data_store = DataStore.from_dir(dir) obs_table = data_store.obs_table ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV') offset = sqrt_space(start=0, stop=2.5, num=100) * u.deg excluded_sources = make_excluded_sources() multi_array = EnergyOffsetBackgroundModel(ebounds, offset) multi_array.fill_obs(obs_table, data_store, excluded_sources) #multi_array.fill_obs(obs_table, data_store) multi_array.compute_rate() bgarray = multi_array.bg_rate energy_range = Energy([1, 10], 'TeV') table = bgarray.acceptance_curve_in_energy_band(energy_range, energy_bins=10) multi_array.write('energy_offset_array.fits', overwrite=True) table.write('acceptance_curve.fits', overwrite=True)
def make_model(): dir = str(gammapy_extra.dir) + '/datasets/hess-crab4-hd-hap-prod2' data_store = DataStore.from_dir(dir) obs_table = data_store.obs_table ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV') offset = sqrt_space(start=0, stop=2.5, num=100) * u.deg excluded_sources = make_excluded_sources() multi_array = EnergyOffsetBackgroundModel(ebounds, offset) multi_array.fill_obs(obs_table, data_store, excluded_sources) # multi_array.fill_obs(obs_table, data_store) multi_array.compute_rate() bgarray = multi_array.bg_rate energy_range = Energy([1, 10], 'TeV') table = bgarray.acceptance_curve_in_energy_band(energy_range, energy_bins=10) multi_array.write('energy_offset_array.fits', overwrite=True) table.write('acceptance_curve.fits', overwrite=True)
obs_summary = ObservationSummary(stats) fig = plt.figure(figsize=(10, 6)) ax1 = fig.add_subplot(121) obs_summary.plot_excess_vs_livetime(ax=ax1) ax2 = fig.add_subplot(122) obs_summary.plot_significance_vs_livetime(ax=ax2) # ## Extract spectrum # # Now, we're going to extract a spectrum using the [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) class. We provide the reconstructed energy binning we want to use. It is expected to be a Quantity with unit energy, i.e. an array with an energy unit. We use a utility function to create it. We also provide the true energy binning to use. # In[ ]: e_reco = EnergyBounds.equal_log_spacing(0.1, 40, 40, unit="TeV") e_true = EnergyBounds.equal_log_spacing(0.05, 100.0, 200, unit="TeV") # Instantiate a [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) object that will do the extraction. The containment_correction parameter is there to allow for PSF leakage correction if one is working with full enclosure IRFs. We also compute a threshold energy and store the result in OGIP compliant files (pha, rmf, arf). This last step might be omitted though. # In[ ]: ANALYSIS_DIR = "crab_analysis" extraction = SpectrumExtraction( observations=observations, bkg_estimate=background_estimator.result, containment_correction=False, ) extraction.run()
# ### Spectral points # # Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error. # In[ ]: # Flux points are computed on stacked observation stacked_obs = extract.observations.stack() print(stacked_obs) # In[ ]: ebounds = EnergyBounds.equal_log_spacing(1, 40, 4, unit=u.TeV) seg = SpectrumEnergyGroupMaker(obs=stacked_obs) seg.compute_groups_fixed(ebounds=ebounds) fpe = FluxPointEstimator( obs=stacked_obs, groups=seg.groups, model=fit.result[0].model ) fpe.compute_points() fpe.flux_points.table # ### Plot # # Let's plot the spectral model and points. You could do it directly, but there is a helper class. # Note that a spectral uncertainty band, a "butterfly" is drawn, but it is very thin, i.e. barely visible.
) # In[5]: # Estimation of the background bkg_estimator = ReflectedRegionsBackgroundEstimator( on_region=on_region, obs_list=obs_list, exclusion_mask=exclusion_mask, ) bkg_estimator.run() # In[6]: # Extract the spectral data e_reco = EnergyBounds.equal_log_spacing(0.7, 100, 50, unit='TeV') # fine binning e_true = EnergyBounds.equal_log_spacing(0.05, 100, 200, unit='TeV') extraction = SpectrumExtraction( obs_list=obs_list, bkg_estimate=bkg_estimator.result, containment_correction=False, e_reco=e_reco, e_true=e_true, ) extraction.run() extraction.compute_energy_threshold( method_lo='area_max', area_percent_lo=10.0, ) # ## Light curve estimation
""" from astropy.coordinates import SkyCoord, Angle from gammapy.datasets import gammapy_extra from gammapy.image import ExclusionMask from gammapy.data import DataStore from gammapy.region import SkyCircleRegion from gammapy.spectrum import SpectrumAnalysis from gammapy.utils.energy import EnergyBounds center = SkyCoord(83.63, 22.01, unit='deg', frame='icrs') radius = Angle('0.3 deg') on_region = SkyCircleRegion(pos=center, radius=radius) bkg_method = dict(type='reflected', n_min=3) exclusion_file = gammapy_extra.filename("datasets/exclusion_masks/" "tevcat_exclusion.fits") excl = ExclusionMask.from_fits(exclusion_file) bounds = EnergyBounds.equal_log_spacing(1, 10, 40, unit='TeV') store = gammapy_extra.filename("datasets/hess-crab4") ds = DataStore.from_dir(store) obs = [23523, 23559] ana = SpectrumAnalysis(datastore=ds, obs=obs, on_region=on_region, bkg_method=bkg_method, exclusion=excl, ebounds=bounds) ana.write_ogip_data(outdir='ogip_data')
modymin = 413 modymax = 573 #E1=0.50 #E2=1.44 #E1=1.44 #E2=4.16 #E1=4.16 #E2=12.01 config_name = input_param["general"]["config_name"] energy_reco = [ Energy(input_param["energy binning"]["Emin"], "TeV"), Energy(input_param["energy binning"]["Emax"], "TeV"), input_param["energy binning"]["nbin"] ] energy_bins = EnergyBounds.equal_log_spacing(0.5, 100, 1, "TeV") for i_E, E in enumerate(energy_bins): E1 = energy_bins[i_E].value E2 = energy_bins[i_E + 1].value visible_grid = True delta_grid = 1.0 minor_tick = 5 palette = VariableColormap('../b.lut', name='b') #palette.set_scale('SQRT') directory = make_outdir_filesresult(source_name, name_method_fond, config_name, image_size, for_integral_flux=False, ereco=energy_reco) fileb = directory + '/residual_morpho_et_flux_step_'
# shouldn't matter, but must contain sufficient number of digits, # so that CDELT1 and CDELT2 are not truncated, when wcs.to_header() is called # seems to be a bug... refheader['CDELT3'] = 2.02 refheader['CTYPE1'] = 'RA---CAR' refheader['CTYPE2'] = 'DEC--CAR' refheader['CTYPE3'] = 'log_Energy' # shouldn't matter refheader['CUNIT1'] = 'deg' refheader['CUNIT2'] = 'deg' refheader['CRVAL1'] = events.meta['RA_OBJ'] refheader['CRVAL2'] = events.meta['DEC_OBJ'] refheader['CRVAL3'] = 10.0 # shouldn't matter energies = EnergyBounds.equal_log_spacing(0.5, 80, 8, 'TeV') data = Quantity(np.zeros((len(energies), 200, 200))) wcs = WCS(refheader) refcube = SpectralCube(data, wcs, energy=energies) # Counts cube log.info('Bin events into cube.') counts_hdu = bin_events_in_cube(events, refcube, energies) counts = SpectralCube(Quantity(counts_hdu.data, 'count'), wcs, energies) log.info('Counts cube shape: {}'.format(counts_hdu.shape)) log.info('Number of events in cube: {}'.format(counts_hdu.data.sum())) counts.writeto('counts.fits', clobber=True) # Exposure cube pointing = SkyCoord(events.meta['RA_PNT'], events.meta['DEC_PNT'], "icrs", unit="deg") livetime = Quantity(events.meta['LIVETIME'], 's')
"--", label="Interpolated values", ) plt.xlabel("{} [{}]".format(nddata.axes[0].name, nddata.axes[0].unit)) plt.ylabel("{} [{}]".format("Exposure", nddata.data.unit)) plt.legend(); # ## 2D example # # Another common use case is to store a Quantity as a function of field of view offset and energy. The following shows how to use the NDDataArray to slice the data array at any values of offset and energy # In[ ]: energy_data = EnergyBounds.equal_log_spacing(1, 10, 50, unit=u.TeV) energy_axis = BinnedDataAxis( lo=energy_data.lower_bounds, hi=energy_data.upper_bounds, name="energy", interpolation_mode="log", ) offset_data = np.linspace(0, 2, 4) * u.deg offset_axis = DataAxis(offset_data, name="offset") data_temp = 10 * np.exp(-energy_data.log_centers.value / 10) data = np.outer(data_temp, (offset_data.value + 1)) nddata2d = NDDataArray( axes=[energy_axis, offset_axis], data=data * u.Unit("cm-2 s-1 TeV-1") )
if freeze_bkg: name += "_bkg_fix" else: name += "_bkg_free" for_integral_flux = input_param["exposure"]["for_integral_flux"] fwhm_frozen = input_param["param_fit"]["gauss_configuration"]["fwhm_frozen"] name += "_fwhm_gauss" + str(fwhm_frozen) if fwhm_frozen: name += "_value" + str( input_param["param_fit"]["gauss_configuration"]["fwhm_init"] * 2.35) if input_param["param_fit"]["use_EM_model"]: name += "_emission_galactic_True" #Energy binning energy_bins = EnergyBounds.equal_log_spacing( input_param["energy binning"]["Emin"], input_param["energy binning"]["Emax"], input_param["energy binning"]["nbin"], 'TeV') energy_centers = energy_bins.log_centers #outdir result and plot config_name = input_param["general"]["config_name"] outdir_result = make_outdir_filesresult(source_name, name_method_fond, len(energy_bins), config_name, image_size, for_integral_flux) outdir_plot = make_outdir_plot(source_name, name_method_fond, len(energy_bins), config_name, image_size, for_integral_flux) #store the fit result for the model of the source filename_table_result = outdir_result + "/flux_fit_result" + name + ".txt" filename_covar_result = outdir_result + "/flux_covar_result" + name + ".txt" table_models = Table.read(filename_table_result, format="ascii")
# load catalogs fermi_3fgl = SourceCatalog3FGL() fermi_2fhl = SourceCatalog2FHL() # access crab data by corresponding identifier crab_3fgl = fermi_3fgl['3FGL J0534.5+2201'] crab_2fhl = fermi_2fhl['2FHL J0534.5+2201'] ax = crab_3fgl.spectral_model.plot(crab_3fgl.energy_range, energy_power=2, label='Fermi 3FGL', color='r', flux_unit='erg-1 cm-2 s-1') ax.set_ylim(1e-12, 1E-9) # set up an energy array to evaluate the butterfly emin, emax = crab_3fgl.energy_range energy = EnergyBounds.equal_log_spacing(emin, emax, 100) butterfly_3fg = crab_3fgl.spectrum.butterfly(energy) butterfly_3fg.plot(crab_3fgl.energy_range, ax=ax, energy_power=2, color='r', flux_unit='erg-1 cm-2 s-1') crab_3fgl.flux_points.plot(ax=ax, sed_type='eflux', color='r', y_unit='erg cm-2 s-1') crab_2fhl.spectral_model.plot(crab_2fhl.energy_range, ax=ax, energy_power=2, c='g', label='Fermi 2FHL', flux_unit='erg-1 cm-2 s-1') # set up an energy array to evaluate the butterfly using the 2FHL energy range emin, emax = crab_2fhl.energy_range energy = EnergyBounds.equal_log_spacing(emin, emax, 100) butterfly_2fhl = crab_2fhl.spectrum.butterfly(energy)
# We will also produce a debug plot in order to show how the global fit matches one of the individual observations. # In[46]: fit.result[0].plot() # we can compute flux points by fitting the norm of the global model in energy bands. We’ll use a fixed energy binning for now. # In[47]: from gammapy.utils.energy import EnergyBounds # Flux points are computed on stacked observation stacked_obs = extract.observations.stack() print(stacked_obs) ebounds = EnergyBounds.equal_log_spacing(0.1, 40, 15, unit=u.TeV) seg = SpectrumEnergyGroupMaker(obs=stacked_obs) seg.compute_range_safe() seg.compute_groups_fixed(ebounds=ebounds) print(seg.groups) # In[48]: fpe = FluxPointEstimator( obs=stacked_obs, groups=seg.groups, model=fit.result[0].model, ) fpe.compute_points()
radius[np.isnan(radius)] = 0.3 exclusion_table['Radius'] = radius * u.deg exclusion_table = Table(exclusion_table) #now run the bgmaker bgmaker = OffDataBackgroundMaker( data_store=datastore, outdir=outdir, run_list=None, obs_table=obs_table_with_group_id, ntot_group=obs_groups.n_groups, excluded_sources=exclusion_table, ) # Define the energy and offset binning to use ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 15, 'TeV') #offset = sqrt_space(start=0, stop=2.5, num=20) * u.deg offset=np.linspace(0,2.5,20) * u.deg # Make the model (i.e. stack counts and livetime) bgmaker.make_model("2D", ebounds=ebounds, offset=offset) # Smooth the model bgmaker.smooth_models("2D") # Write the model to disk bgmaker.save_models("2D") bgmaker.save_models(modeltype="2D", smooth=True) #now copy the background files as bkg into the source runs data_dir="data_new" shutil.move(outdir, data_dir)
plt.plot(x, y) plt.show() time = (datastore.obs_table["TSTART"]> 55562.0) & (datastore.obs_table["TSTOP"] < 56927) sel=(time) & (sep<2.0*u.deg) sel np.where(sel) crabrun=(datastore.obs_table[sel]) on_region=CircleSkyRegion(crab,0.15 * u.deg) model = models.LogParabola( alpha = 2.3, beta = 0, amplitude = 1e-11 * u.Unit('cm-2 s-1 TeV-1'), reference = 1 * u.TeV, ) flux_point_binning = EnergyBounds.equal_log_spacing(0.7, 30, 5, u.TeV) exclusion_mask = SkyImage.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits') import gammapy import numpy as np import astropy import regions import sherpa import matplotlib.pyplot as plt import astropy.units as u from astropy.coordinates import SkyCoord, Angle from astropy.table import vstack as vstack_table from regions import CircleSkyRegion from gammapy.data import DataStore, ObservationList from gammapy.data import ObservationStats, ObservationSummary from gammapy.background.reflected import ReflectedRegionsBackgroundEstimator
bkg_estimator = PhaseBackgroundEstimator( observations=obs_list_vela, on_region=on_region, on_phase=on_phase_range, off_phase=off_phase_range, ) bkg_estimator.run() bkg_estimate = bkg_estimator.result # The rest of the analysis is the same as for a standard spectral analysis with Gammapy. All the specificity of a phase-resolved analysis is contained in the PhaseBackgroundEstimator, where the background is estimated in the ON-region OFF-phase rather than in an OFF-region. # # We can now extract a spectrum with the SpectrumExtraction class. It takes the reconstructed and the true energy binning. Both are expected to be a Quantity with unit energy, i.e. an array with an energy unit. EnergyBounds is a dedicated class to do it. # In[ ]: etrue = EnergyBounds.equal_log_spacing(0.005, 10.0, 100, unit="TeV") ereco = EnergyBounds.equal_log_spacing(0.01, 10, 30, unit="TeV") extraction = SpectrumExtraction( observations=obs_list_vela, bkg_estimate=bkg_estimate, containment_correction=True, e_true=etrue, e_reco=ereco, ) extraction.run() extraction.compute_energy_threshold(method_lo="energy_bias", bias_percent_lo=20) # Now let's a look at the files we just created with spectrum_observation.
large_gaus = Gauss2D("g2") source_center_SgrA = SkyCoord.from_name("SgrA*") large_gaus.xpos,large_gaus.ypos=skycoord_to_pixel(source_center_SgrA, excess.wcs) CS_map=SkyMap.read("CStot.fits") cs_reproj=CS_map.reproject(excess) cs_reproj.data[np.where(np.isnan(cs_reproj.data))]=0 #cs_reproj.data[np.where(cs_reproj.data<50)]=0 cs_reproj.write("cs_map_reproj.fits", clobber=True) load_table_model("CS","cs_map_reproj.fits") set_full_model(psf_SgrA(large_gaus*CS)) #large_gaus.fwhm=150 #freeze(large_gaus.fwhm) fit() """ pt.ion() energy_bins = EnergyBounds.equal_log_spacing(0.5, 100, 5, 'TeV') #E1=energy_bins[1].value # E2=energy_bins[2].value #for i_E, E in enumerate(energy_bins[0:-3]): for i_E, E in enumerate(energy_bins[0:-5]): #E1 = energy_bins[i_E].value #E2 = energy_bins[i_E+1].value E1 = energy_bins[0].value E2 = energy_bins[3].value print "Energy band= E1=" + str(E1) + "et E2=" + str(E2) # on = SkyMapCollection.read("fov_bg_maps"+str(E1)+"_"+str(E2)+"_TeV.fits")["excess"] on = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["counts"] bkgmap = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["bkg"] exp = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["exposure"] # source_J1745_303 = SkyCoord(358.76,-0.51, unit='deg',frame="galactic")
# Now we'll define the input for the spectrum analysis. It will be done the python way, i.e. by creating a config dict containing python objects. We plan to add also the convenience to configure the analysis using a plain text config file. # In[3]: crab_pos = SkyCoord.from_name('crab') on_region = CircleSkyRegion(crab_pos, 0.15 * u.deg) model = models.LogParabola( alpha = 2.3, beta = 0, amplitude = 1e-11 * u.Unit('cm-2 s-1 TeV-1'), reference = 1 * u.TeV, ) flux_point_binning = EnergyBounds.equal_log_spacing(0.7, 30, 5, u.TeV) exclusion_mask = SkyImage.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits') # In[4]: config = dict( outdir = None, background = dict( on_region=on_region, exclusion_mask=exclusion_mask, min_distance = 0.1 * u.rad, ), extraction = dict(containment_correction=False),
input_param = yaml.load(open(sys.argv[1])) image_size = input_param["general"]["image_size"] # Input param fit and source configuration # Sur quelle taille de la carte on fait le fit freeze_bkg = input_param["param_fit_morpho"]["freeze_bkg"] source_name = input_param["general"]["source_name"] name_method_fond = input_param["general"]["name_method_fond"] if freeze_bkg: name = "_bkg_fix" else: name = "_bkg_free" for_integral_flux = input_param["exposure"]["for_integral_flux"] # Energy binning energy_bins = EnergyBounds.equal_log_spacing( input_param["energy binning"]["Emin"], input_param["energy binning"]["Emax"], input_param["energy binning"]["nbin"], 'TeV') energy_centers = energy_bins.log_centers energy_reco = [ Energy(input_param["energy binning"]["Emin"], "TeV"), Energy(input_param["energy binning"]["Emax"], "TeV"), input_param["energy binning"]["nbin"] ] # outdir data and result config_name = input_param["general"]["config_name"] outdir_data = make_outdir_data(source_name, name_method_fond, config_name, image_size, for_integral_flux=False,
fermi_2fhl = SourceCatalog2FHL() # access crab data by corresponding identifier crab_3fgl = fermi_3fgl['3FGL J0534.5+2201'] crab_2fhl = fermi_2fhl['2FHL J0534.5+2201'] ax = crab_3fgl.spectral_model.plot(crab_3fgl.energy_range, energy_power=2, label='Fermi 3FGL', color='r', flux_unit='erg-1 cm-2 s-1') ax.set_ylim(1e-12, 1E-9) # set up an energy array to evaluate the butterfly emin, emax = crab_3fgl.energy_range energy = EnergyBounds.equal_log_spacing(emin, emax, 100) butterfly_3fg = crab_3fgl.spectrum.butterfly(energy) butterfly_3fg.plot(crab_3fgl.energy_range, ax=ax, energy_power=2, color='r', flux_unit='erg-1 cm-2 s-1') crab_3fgl.flux_points.plot(ax=ax, sed_type='eflux', color='r', flux_unit='erg cm-2 s-1') crab_2fhl.spectral_model.plot(crab_2fhl.energy_range, ax=ax,