def read(cls, phafile, rmffile=None): """Read PHA fits file The energy binning is not contained in the PHA standard. Therefore is is inferred from the corresponding RMF EBOUNDS extension. Parameters ---------- phafile : str PHA file with ``SPECTRUM`` extension rmffile : str RMF file with ``EBOUNDS`` extennsion, optional """ phafile = make_path(phafile) spectrum = fits.open(str(phafile))['SPECTRUM'] counts = [val[1] for val in spectrum.data] if rmffile is None: val = spectrum.header['RESPFILE'] if val == '': raise ValueError('RMF file not set in PHA header. ' 'Please provide RMF file for energy binning') parts = phafile.parts[:-1] rmffile = Path.cwd() for part in parts: rmffile = rmffile.joinpath(part) rmffile = rmffile.joinpath(val) rmffile = make_path(rmffile) ebounds = fits.open(str(rmffile))['EBOUNDS'] bins = EnergyBounds.from_ebounds(ebounds) livetime = Quantity(0, 's') return cls(counts, bins, livetime=livetime)
def read(cls, filename): """Read from a FITS file. Parameters ---------- filename : `str` File containing the IRFs """ filename = str(make_path(filename)) hdu_list = fits.open(filename) aeff = EffectiveAreaTable2D.read(filename, hdu='EFFECTIVE AREA') bkg = Background3D.read(filename, hdu='BACKGROUND') edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION') psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION') if 'SENSITIVITY' in hdu_list: sensi = SensitivityTable.read(filename, hdu='SENSITIVITY') else: sensi = None return cls( aeff=aeff, bkg=bkg, edisp=edisp, psf=psf, ref_sensi=sensi, )
def read(cls, filename, **kwargs): """Read :ref:`gadf:iact-events` Parameters ---------- filename: `~gammapy.extern.pathlib.Path`, str File to read """ filename = make_path(filename) if 'hdu' not in kwargs: kwargs.update(hdu='EVENTS') return super(EventList, cls).read(str(filename), **kwargs)
def read(cls, filename): """Read map dataset from file. Parameters ---------- filename : str Filename to read from. Returns ------- flux_maps : `~gammapy.estimators.FluxMaps` Flux maps object. """ with fits.open(str(make_path(filename)), memmap=False) as hdulist: return cls.from_hdulist(hdulist)
def write(self, filename, overwrite=False, format="gadf", hdu="SKYMAP"): """Write map to file Parameters ---------- filename : `pathlib.Path` or str Filename. format : {"gadf", "ogip", "ogip-sherpa", "ogip-arf", "ogip-arf-sherpa"} Which format to use. overwrite : bool Overwrite existing files? """ filename = make_path(filename) self.to_hdulist(format=format, hdu=hdu).writeto(filename, overwrite=overwrite)
def dataset(): path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits" table = Table.read(make_path(path)) table["e_ref"] = table["e_ref"].quantity.to("TeV") data = FluxPoints.from_table(table, format="gadf-sed") model = SkyModel(spectral_model=PowerLawSpectralModel( index=2.3, amplitude="2e-13 cm-2 s-1 TeV-1", reference="1 TeV")) obs_table = Table() obs_table["TELESCOP"] = ["CTA"] obs_table["OBS_ID"] = ["0001"] obs_table["INSTRUME"] = ["South_Z20_50h"] dataset = FluxPointsDataset(model, data, meta_table=obs_table) return dataset
def read_models(self, path, extend=True): """Read models from YAML file. Parameters ---------- path : str path to the model file extend : bool Extend the exiting models on the datasets or replace them. """ path = make_path(path) models = Models.read(path) self.set_models(models, extend=extend) log.info(f"Models loaded from {path}.")
def read(cls, filename, name=""): """Read map dataset from file. Parameters ---------- filename : str Filename to read from. Returns ------- dataset : `MapDataset` Map dataset. """ with fits.open(make_path(filename), memmap=False) as hdulist: return cls.from_hdulist(hdulist, name=name)
def write(self, filename, filename_model=None, overwrite=False, sed_type="likelihood"): """Write flux map to file. Parameters ---------- filename : str Filename to write to. filename_model : str Filename of the model (yaml format). If None, keep string before '.' and add '_model.yaml' suffix overwrite : bool Overwrite file if it exists. sed_type : str sed type to convert to. Default is `likelihood` """ filename = make_path(filename) if filename_model is None: name_string = filename.as_posix() for suffix in filename.suffixes: name_string.replace(suffix, "") filename_model = name_string + "_model.yaml" filename_model = make_path(filename_model) hdulist = self.to_hdulist(sed_type) models = Models(self.reference_model) models.write(filename_model, overwrite=overwrite) hdulist[0].header["MODEL"] = filename_model.as_posix() hdulist.writeto(filename, overwrite=overwrite)
def __init__( self, table, time_0=time_0.quantity, phase_0=phase_0.quantity, f0=f0.quantity, f1=f1.quantity, f2=f2.quantity, filename=None, ): self.table = table if filename is not None: filename = str(make_path(filename)) self.filename = filename super().__init__(time_0=time_0, phase_0=phase_0, f0=f0, f1=f1, f2=f2)
def get_observations(self): """Fetch observations from the data store according to criteria defined in the configuration.""" self.config.validate() log.info("Fetching observations.") datastore_path = make_path(self.settings["observations"]["datastore"]) if datastore_path.is_file(): datastore = DataStore().from_file(datastore_path) elif datastore_path.is_dir(): datastore = DataStore().from_dir(datastore_path) else: raise FileNotFoundError(f"Datastore {datastore_path} not found.") ids = set() selection = dict() for criteria in self.settings["observations"]["filters"]: selected_obs = ObservationTable() # TODO: Reduce significantly the code. # This block would be handled by datastore.obs_table.select_observations selection["type"] = criteria["filter_type"] for key, val in criteria.items(): if key in ["lon", "lat", "radius", "border"]: val = Angle(val) selection[key] = val if selection["type"] == "angle_box": selection["type"] = "par_box" selection["value_range"] = Angle(criteria["value_range"]) if selection["type"] == "sky_circle" or selection["type"].endswith("_box"): selected_obs = datastore.obs_table.select_observations(selection) if selection["type"] == "par_value": mask = ( datastore.obs_table[criteria["variable"]] == criteria["value_param"] ) selected_obs = datastore.obs_table[mask] if selection["type"] == "ids": obs_list = datastore.get_observations(criteria["obs_ids"]) selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list] if selection["type"] == "all": obs_list = datastore.get_observations() selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list] if len(selected_obs): if "exclude" in criteria and criteria["exclude"]: ids.difference_update(selected_obs["OBS_ID"].tolist()) else: ids.update(selected_obs["OBS_ID"].tolist()) self.observations = datastore.get_observations(ids, skip_missing=True) for obs in self.observations.list: log.info(obs)
def __init__(self, filename=None, hdu='HGPS_SOURCES'): if not filename: filename = Path(os.environ['HGPS_ANALYSIS']) / 'data/catalogs/HGPS3/release/HGPS_v0.4.fits' filename = make_path(filename) table = Table.read(str(filename), hdu=hdu) source_name_alias = ('Associated_Object',) super(SourceCatalogHGPS, self).__init__(table=table, source_name_alias=source_name_alias) if hdu == 'HGPS_SOURCES': self.components = Table.read(str(filename), hdu='HGPS_COMPONENTS') self.associations = Table.read(str(filename), hdu='HGPS_ASSOCIATIONS') self.identifications = Table.read(str(filename), hdu='HGPS_IDENTIFICATIONS')
def write(self, outdir, ogipdir="ogip_data", use_sherpa=False): """Write results to disk. Parameters ---------- outdir : `~gammapy.extern.pathlib.Path` Output folder ogipdir : str, optional Folder name for OGIP data, default: 'ogip_data' use_sherpa : bool, optional Write Sherpa compliant files, default: False """ outdir = make_path(outdir) log.info("Writing OGIP files to {}".format(outdir / ogipdir)) outdir.mkdir(exist_ok=True, parents=True) self.observations.write(outdir / ogipdir, use_sherpa=use_sherpa)
def read(cls, filename): """Read map dataset from file. Parameters ---------- filename : str Filename to read from. Returns ------- dataset : `MapDataset` Map dataset. """ filename = make_path(filename) hdulist = fits.open(str(filename)) return cls.from_hdulist(hdulist)
def read(cls, filename): """Read from XML file. The XML definition of some models is uncompatible with the models currently implemented in gammapy. Therefore the following modifications happen to the XML model definition * PowerLaw: The spectral index is negative in XML but positive in gammapy. Parameter limits are ignored * ExponentialCutoffPowerLaw: The cutoff energy is transferred to lambda = 1 / cutof energy on read """ path = make_path(filename) xml = path.read_text() return cls.from_xml(xml)
def write(self, path, overwrite=False): """Write to YAML file.""" path = make_path(path) if path.exists() and not overwrite: raise IOError(f"File exists already: {path}") if self.covariance is not None and len(self.parameters) != 0: filename = splitext(str(path))[0] + "_covariance.dat" kwargs = dict(format="ascii.fixed_width", delimiter="|", overwrite=overwrite) self.write_covariance(filename, **kwargs) self._covar_file = filename path.write_text(self.to_yaml())
def write(self, filename, **kwargs): """Write flux points. Parameters ---------- filename : str Filename kwargs : dict Keyword arguments passed to `astropy.table.Table.write`. """ filename = make_path(filename) try: self.table.write(str(filename), **kwargs) except IORegistryError: kwargs.setdefault("format", "ascii.ecsv") self.table.write(str(filename), **kwargs)
def make_summary(instrument): log.info(f"Preparing summary: {instrument}") # Data info # Fit results path = f"results/fit_{instrument}.rst" tab = Table.read(path, format="ascii") tab.add_index("name") dt = "U30" comp_tab = Table(names=("Param", "joint crab paper", "gammapy"), dtype=[dt, dt, dt]) filename = make_path( str(Path().resolve().parent / "data" / "joint-crab" / "published" / "fit")) filename = filename / f"fit_{instrument}.yaml" with open(filename, "r") as file: paper_result = yaml.safe_load(file) for par in paper_result["parameters"]: if par["name"] is not "reference": name = par["name"] ref = par["value"] ref_error = par["error"] if name == "beta": factor = np.log(10) else: factor = 1 value = tab.loc[name]["value"] * factor error = tab.loc[name]["error"] * factor comp_tab.add_row([ name, f"{ref:.3e} ± {ref_error:.3e}", f"{value:.3e} ± {error:.3e}" ]) # Generate README.md file with table and plots path = f"results/{instrument}_comparison_table.md" comp_tab.write(path, format="ascii.html", overwrite=True) txt = Path(f"results/{instrument}_comparison_table.md").read_text() plot_contours(instrument) im1 = f"\n ![Contours](contours_{instrument}.png)" out = txt + im1 Path(f"results/{instrument}_summary.md").write_text(out)
def write(self, filename, **kwargs): """ Write flux points. Parameters ---------- filename : str Filename kwargs : dict Keyword arguments passed to `~astropy.table.Table.write`. """ filename = make_path(filename) try: self.table.write(str(filename), **kwargs) except IORegistryError: kwargs.setdefault("format", "ascii.ecsv") self.table.write(str(filename), **kwargs)
def test_time_sampling(tmp_path): time = np.arange(0, 10, 0.06) * u.hour table = Table() table["TIME"] = time table["NORM"] = rate(time) table.meta = dict(MJDREFI=55197.0, MJDREFF=0, TIMEUNIT="hour") temporal_model = LightCurveTemplateTemporalModel(table) filename = str(make_path(tmp_path / "tmp.fits")) temporal_model.write(path=filename) model_read = temporal_model.read(filename) assert temporal_model.filename == filename assert model_read.filename == filename assert_allclose(model_read.table["TIME"].quantity.value, time.value) t_ref = "2010-01-01T00:00:00" t_min = "2010-01-01T00:00:00" t_max = "2010-01-01T08:00:00" sampler = temporal_model.sample_time(n_events=2, t_min=t_min, t_max=t_max, random_state=0, t_delta="10 min") sampler = u.Quantity((sampler - Time(t_ref)).sec, "s") assert len(sampler) == 2 assert_allclose(sampler.value, [12661.65802564, 7826.92991], rtol=1e-5) table = Table() table["TIME"] = time table["NORM"] = np.ones(len(time)) table.meta = dict(MJDREFI=55197.0, MJDREFF=0, TIMEUNIT="hour") temporal_model_uniform = LightCurveTemplateTemporalModel(table) sampler_uniform = temporal_model_uniform.sample_time(n_events=2, t_min=t_min, t_max=t_max, random_state=0, t_delta="10 min") sampler_uniform = u.Quantity((sampler_uniform - Time(t_ref)).sec, "s") assert len(sampler_uniform) == 2 assert_allclose(sampler_uniform.value, [1261.65802564, 6026.9299098], rtol=1e-5)
def test_irf_dict_from_file_duplicate_irfs(caplog, tmp_path): """catch the warning message about two type of IRF with the same hdu class encountered in the same file""" original_file = make_path( "$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz") dummy_file = tmp_path / "020136_duplicated_psf.fits" # create a dummy file with the PSF HDU repeated twice f = fits.open(original_file) f.append(f[5].copy()) f[7].name = "PSF2" f.writeto(dummy_file) load_irf_dict_from_file(dummy_file) assert "more than one HDU" in caplog.text assert "loaded the PSF HDU in the dictionary" in caplog.text
def read(cls, filename): """Read map dictionary from file. Because FITS keywords are case insensitive, all key names will return as lower-case. Parameters ---------- filename : str Filename to read from. Returns ------- maps : `~gammapy.maps.Maps` Maps object. """ with fits.open(str(make_path(filename)), memmap=False) as hdulist: return cls.from_hdulist(hdulist)
def read(cls, filename, **kwargs): """Read :ref:`gadf:hdu-index`. Parameters ---------- filename : `pathlib.Path`, str Filename """ filename = make_path(filename) table = super().read(filename, **kwargs) table.meta["BASE_DIR"] = filename.parent.as_posix() # TODO: this is a workaround for the joint-crab validation with astropy>4.0. # TODO: Remove when handling of empty columns is clarified table["FILE_DIR"].fill_value = "" return table.filled()
def read(cls, filename, offset='0.5 deg'): """Read from a FITS file. Compute RMF at 0.5 deg offset on fly. Parameters ---------- filename : `str` File containing the IRFs """ filename = str(make_path(filename)) with fits.open(filename, memmap=False) as hdulist: aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist) edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION') bkg = BgRateTable.from_hdulist(hdulist=hdulist) psf = Psf68Table.from_hdulist(hdulist=hdulist) sens = SensitivityTable.from_hdulist(hdulist=hdulist) # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area) e_reco_min = bkg.energy.lo[0] e_reco_max = bkg.energy.hi[-1] e_reco_bin = bkg.energy.nbins e_reco_axis = EnergyBounds.equal_log_spacing( e_reco_min, e_reco_max, e_reco_bin, 'TeV', ) e_true_min = aeff.energy.lo[0] e_true_max = aeff.energy.hi[-1] e_true_bin = aeff.energy.nbins e_true_axis = EnergyBounds.equal_log_spacing( e_true_min, e_true_max, e_true_bin, 'TeV', ) rmf = edisp.to_energy_dispersion( offset=offset, e_reco=e_reco_axis, e_true=e_true_axis, ) return cls( aeff=aeff, bkg=bkg, edisp=edisp, psf=psf, sens=sens, rmf=rmf )
def read(cls, filename, hdu1="MATRIX", hdu2="EBOUNDS"): """Read from file. Parameters ---------- filename : `pathlib.Path`, str File to read hdu1 : str, optional HDU containing the energy dispersion matrix, default: MATRIX hdu2 : str, optional HDU containing the energy axis information, default, EBOUNDS """ filename = make_path(filename) with fits.open(str(filename), memmap=False) as hdulist: edisp = cls.from_hdulist(hdulist, hdu1=hdu1, hdu2=hdu2) return edisp
def __init__( self, filename="$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz", hdu="HGPS_SOURCES", ): filename = make_path(filename) table = Table.read(filename, hdu=hdu) source_name_alias = ("Identified_Object",) super().__init__(table=table, source_name_alias=source_name_alias) self._table_components = Table.read(filename, hdu="HGPS_GAUSS_COMPONENTS") self._table_associations = Table.read(filename, hdu="HGPS_ASSOCIATIONS") self._table_associations["Separation"].format = ".6f" self._table_identifications = Table.read(filename, hdu="HGPS_IDENTIFICATIONS") self._table_large_scale_component = Table.read( filename, hdu="HGPS_LARGE_SCALE_COMPONENT" )
def __init__(self, mDM, channel): self.table_path = make_path(self.table_filename) if not self.table_path.exists(): raise FileNotFoundError( f"\n\nFile not found: {self.table_filename}\n" "You may download the dataset needed with the following command:\n" "gammapy download datasets --src dark_matter_spectra") else: self.table = Table.read( str(self.table_path), format="ascii.fast_basic", guess=False, delimiter=" ", ) self.mDM = mDM self.channel = channel
def read(cls, filename, hdu="EVENTS"): """Read pointing information table from file to obtain the metadata. Parameters ---------- filename : str File name hdu : int or str HDU number or name Returns ------- pointing_info : `PointingInfo` Pointing info object """ filename = make_path(filename) table = Table.read(filename, hdu=hdu) return cls(meta=table.meta)
def from_dict(cls, data, **kwargs): """Create spectrum dataset from dict. Parameters ---------- data : dict Dict containing data to create dataset from. Returns ------- dataset : `SpectrumDatasetOnOff` Spectrum dataset on off. """ filename = make_path(data["filename"]) dataset = cls.read(filename=filename) dataset.mask_fit = None return dataset
def read(cls, filename, hdu="POINTING"): """Read `PointingInfo` table from file. Parameters ---------- filename : str File name hdu : int or str HDU number or name Returns ------- pointing_info : `PointingInfo` Pointing info object """ filename = make_path(filename) table = Table.read(filename, hdu=hdu) return cls(table=table)
def read_covariance(self, path, filename="_covariance.dat", **kwargs): """Read covariance data from file Parameters ---------- filename : str Filename **kwargs : dict Keyword arguments passed to `~astropy.table.Table.read` """ path = make_path(path) filepath = str(path / filename) t = Table.read(filepath, **kwargs) t.remove_column("Parameters") arr = np.array(t) data = arr.view(float).reshape(arr.shape + (-1, )) self.covariance = data self._covar_file = filename
def __init__(self, filename="$GAMMAPY_DATA/catalogs/fermi/gll_psch_v09.fit.gz"): filename = make_path(filename) with warnings.catch_warnings(): # ignore FITS units warnings warnings.simplefilter("ignore", u.UnitsWarning) table = Table.read(filename, hdu="2FHL Source Catalog") table_standardise_units_inplace(table) source_name_key = "Source_Name" source_name_alias = ("ASSOC", "3FGL_Name", "1FHL_Name", "TeVCat_Name") super().__init__( table=table, source_name_key=source_name_key, source_name_alias=source_name_alias, ) self.extended_sources_table = Table.read(filename, hdu="Extended Sources") self.rois = Table.read(filename, hdu="ROIs")
def read(cls, filename, hdu=None, format="gadf-dl3"): """Read from file. Parameters ---------- filename : str or `Path` Filename hdu : str HDU name format : {"gadf-dl3"} Format specification Returns ------- irf : `IRF` IRF class """ with fits.open(str(make_path(filename)), memmap=False) as hdulist: return cls.from_hdulist(hdulist, hdu=hdu)
def read(cls, filename, interp_kwargs=None): """Build object from an XSPEC model. Todo: Format of XSPEC binary files should be referenced at https://gamma-astro-data-formats.readthedocs.io/en/latest/ Parameters ---------- filename : str File containing the model. interp_kwargs : dict Interpolation option passed to `ScaledRegularGridInterpolator`. Returns ------- absorption : `Absorption` Absorption model. """ # Create EBL data array filename = make_path(filename) table_param = Table.read(filename, hdu="PARAMETERS") # TODO: for some reason the table contain duplicated values param, idx = np.unique(table_param[0]["VALUE"], return_index=True) # Get energy values table_energy = Table.read(filename, hdu="ENERGIES") energy_lo = u.Quantity(table_energy["ENERG_LO"], "keV", copy=False) # unit not stored in file energy_hi = u.Quantity(table_energy["ENERG_HI"], "keV", copy=False) # unit not stored in file energy = np.sqrt(energy_lo * energy_hi) # Get spectrum values table_spectra = Table.read(filename, hdu="SPECTRA") data = table_spectra["INTPSPEC"].data[idx, :] return cls( energy=energy, param=param, data=data, filename=filename, interp_kwargs=interp_kwargs, )
def __init__(self, filename="$GAMMAPY_DATA/catalogs/fermi/gll_psch_v07.fit.gz"): filename = str(make_path(filename)) with warnings.catch_warnings(): # ignore FITS units warnings warnings.simplefilter("ignore", u.UnitsWarning) table = Table.read(filename, hdu="LAT_Point_Source_Catalog") table_standardise_units_inplace(table) source_name_key = "Source_Name" source_name_alias = ("ASSOC1", "ASSOC2", "ASSOC_TEV", "ASSOC_GAM") super().__init__( table=table, source_name_key=source_name_key, source_name_alias=source_name_alias, ) self.extended_sources_table = Table.read(filename, hdu="ExtendedSources")
def read(cls, filename, **kwargs): """ Read flux points. Parameters ---------- filename : str Filename kwargs : dict Keyword arguments passed to `~astropy.table.Table.read`. """ filename = make_path(filename) try: table = Table.read(str(filename), **kwargs) except IORegistryError: kwargs.setdefault("format", "ascii.ecsv") table = Table.read(str(filename), **kwargs) if "SED_TYPE" not in table.meta.keys(): sed_type = cls._guess_sed_type(table) table.meta["SED_TYPE"] = sed_type return cls(table=table)
def read(cls, filename, **kwargs): filename = make_path(filename) table = Table.read(str(filename), **kwargs) return cls(table=table)
) test_args.append( dict(chain='ParisAnalysis', store='hess-crab4-pa', obs=23523, aeff2D_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_aeff2D_reference.txt', edisp2D_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_edisp2D_reference.txt', psf3gauss_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_psf3gauss_reference.txt') ) with open('reference_info.yaml', 'w') as outfile: outfile.write(yaml.dump(test_args, default_flow_style=False)) dm = data_manager() for chain in test_args: store = dm[chain['store']] aeff = store.load(chain['obs'], filetype='aeff') filename = make_path(chain['aeff2D_reference_file']) f = open(str(filename), 'w') f.write(aeff.info()) edisp = store.load(chain['obs'], filetype='edisp') filename = make_path(chain['edisp2D_reference_file']) f = open(str(filename), 'w') f.write(edisp.info()) psf3g = store.load(chain['obs'], filetype='psf') filename = make_path(chain['psf3gauss_reference_file']) f = open(str(filename), 'w') f.write(psf3g.info())
def read(cls, filename, hdu='SENSITVITY'): filename = make_path(filename) with fits.open(str(filename), memmap=False) as hdulist: return cls.from_hdulist(hdulist, hdu=hdu)
def read(cls, filename, hdu='POINT SPREAD FUNCTION'): filename = make_path(filename) with fits.open(str(filename), memmap=False) as hdulist: return cls.from_hdulist(hdulist, hdu=hdu)
psf_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_psf_reference.txt', obs_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_obs_reference.txt', location_reference_file='$GAMMAPY_EXTRA/test_datasets/reference/pa_location_reference.txt') ) test_energy = [0.1, 1, 5, 10] * u.TeV test_offset = [0.1, 0.2, 0.4] * u.deg dm = data_manager() for chain in test_args: store = dm[chain['store']] chain['obs_id'] = int(store.obs_table['OBS_ID'][chain['obs']]) obs = store.obs(obs_id = chain['obs_id']) filename = make_path(chain['location_reference_file']) f = open(str(filename), 'w') f.write(str(obs.location(hdu_type='events').path(abs_path=False))) filename = make_path(chain['obs_reference_file']) f = open(str(filename), 'w') f.write(str(obs)) aeff = obs.aeff print(aeff) aeff_val = aeff.evaluate(energy=test_energy, offset=test_offset) filename = make_path(chain['aeff2D_reference_file']) np.savetxt(str(filename), aeff_val) edisp = obs.edisp