Beispiel #1
0
def make_fresh_dir(path):
    """Make a fresh directory. Delete first if exists"""
    path = Path(path)
    if path.is_dir():
        shutil.rmtree(str(path))
    path.mkdir()
    return path
Beispiel #2
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    ################################
    # Directory and file names
    ################################
    base_dir = '/nfs/d22/hfm/hillert/data-files/galprop'
    run = 'results_54_0353000q_zdguc6ruot1bue7f'
    galprop_dir = Path(base_dir) / run
    results_dir = galprop_dir / 'results'
    ref_file = Path(base_dir) / 'hess_exclusion_0.3_part.fits'

    # Create and set result dir
    results_dir.mkdir(exist_ok=True)
    results_dir.chdir()

    ################################
    # Prepare data
    ################################

    galprop.prepare(galprop_dir)
    galprop.reproject_to(ref_file)
    galprop.make_mask_and_area(ref_file)

    ################################
    # Compute results and make plots
    ################################

    g = galprop.Galprop(clobber=True)
    g.do_all()
Beispiel #3
0
    def write_ogip(self, phafile=None, bkgfile=None, rmffile=None, arffile=None,
                   outdir=None, clobber=True):
        """Write OGIP files

        Only those objects are written have been created with the appropriate
        functions before

        Parameters
        ----------
        phafile : str
            PHA filename
        bkgfile : str
            BKG filename
        arffile : str
            ARF filename
        rmffile : str
            RMF : filename
        outdir : None
            directory to write the files to
        clobber : bool
            Overwrite
        """

        if outdir is None:
            outdir = "ogip_data"

        basedir = Path(outdir)
        basedir.mkdir(exist_ok=True)

        if arffile is None:
            arffile = basedir / "arf_run{}.fits".format(self.obs)
        if rmffile is None:
            rmffile = basedir / "rmf_run{}.fits".format(self.obs)
        if phafile is None:
            phafile = basedir / "pha_run{}.pha".format(self.obs)
        if bkgfile is None:
            bkgfile = basedir / "bkg_run{}.pha".format(self.obs)

        self.phafile = phafile

        if self.pha is not None:
            self.pha.write(str(phafile), bkg=str(bkgfile), arf=str(arffile),
                           rmf=str(rmffile), clobber=clobber)
        if self.bkg is not None:
            self.bkg.write(str(bkgfile), clobber=clobber)
        if self.arf is not None:
            self.arf.write(str(arffile), energy_unit='keV', effarea_unit='cm2',
                           clobber=clobber)
        if self.rmf is not None:
            self.rmf.write(str(rmffile), energy_unit='keV', clobber=clobber)
Beispiel #4
0
    def read(cls, phafile, rmffile=None):
        """Read PHA fits file

        The energy binning is not contained in the PHA standard. Therefore is
        is inferred from the corresponding RMF EBOUNDS extension.

        Parameters
        ----------
        phafile : str
            PHA file with ``SPECTRUM`` extension
        rmffile : str
            RMF file with ``EBOUNDS`` extennsion, optional
        """
        phafile = make_path(phafile)
        spectrum = fits.open(str(phafile))['SPECTRUM']
        counts = [val[1] for val in spectrum.data]
        if rmffile is None:
            val = spectrum.header['RESPFILE']
            if val == '':
                raise ValueError('RMF file not set in PHA header. '
                                 'Please provide RMF file for energy binning')
            parts = phafile.parts[:-1]
            rmffile = Path.cwd()
            for part in parts:
                rmffile = rmffile.joinpath(part)
            rmffile = rmffile.joinpath(val)

        rmffile = make_path(rmffile)
        ebounds = fits.open(str(rmffile))['EBOUNDS']
        bins = EnergyBounds.from_ebounds(ebounds)
        livetime = Quantity(0, 's')
        return cls(counts, bins, livetime=livetime)
Beispiel #5
0
    def run(self, method='sherpa', outdir=None):
        """Run all steps

        Parameters
        ----------
        method : str {sherpa}
            Fit method to use
        outdir : Path, str
            directory to write results files to
        """
        cwd = Path.cwd()
        outdir = cwd if outdir is None else make_path(outdir)
        outdir.mkdir(exist_ok=True)
        os.chdir(str(outdir))

        self.set_default_thresholds()

        if method == 'hspec':
            self._run_hspec_fit()
        elif method == 'sherpa':
            self._run_sherpa_fit()
        else:
            raise ValueError('Undefined fitting method')

        modelname = self.result.spectral_model
        self.result.to_yaml('fit_result_{}.yaml'.format(modelname))
        self.write_npred()

        os.chdir(str(cwd))
Beispiel #6
0
    def run(self, method='sherpa', outdir=None):
        """Run all steps

        Parameters
        ----------
        method : str {sherpa}
            Fit method to use
        outdir : Path, str
            directory to write results files to
        """
        cwd = Path.cwd()
        outdir = cwd if outdir is None else make_path(outdir)
        outdir.mkdir(exist_ok=True)
        os.chdir(str(outdir))

        self.set_default_thresholds()

        if method == 'hspec':
            self._run_hspec_fit()
        elif method == 'sherpa':
            self._run_sherpa_fit()
        else:
            raise ValueError('Undefined fitting method')

        modelname = self.result.spectral_model
        self.result.to_yaml('fit_result_{}.yaml'.format(modelname))
        self.write_npred()

        os.chdir(str(cwd))
Beispiel #7
0
def main():

    if "GAMMAPY_DATA" not in os.environ:
        logging.info("GAMMAPY_DATA environment variable not set.")
        logging.info("Running notebook tests requires this environment variable.")
        logging.info("Exiting now.")
        sys.exit()

    passed = True
    yamlfile = get_notebooks()
    dirnbs = Path("tutorials")

    for notebook in yamlfile:
        if requirement_missing(notebook):
            logging.info(
                "Skipping notebook {} because requirement is missing.".format(
                    notebook["name"]
                )
            )
            continue

        filename = notebook["name"] + ".ipynb"
        path = dirnbs / filename

        if not notebook_test(path):
            passed = False

    assert passed
Beispiel #8
0
def get_notebooks():
    """Read `notebooks.yaml` info."""
    filename = str(
        Path(os.environ['GAMMAPY_EXTRA']) / 'notebooks' / 'notebooks.yaml')
    with open(filename) as fh:
        notebooks = yaml.safe_load(fh)
    return notebooks
Beispiel #9
0
def load_cubes(config):
    cube_dir = Path(config['logging']['working_dir'])
    npred_cube = SkyCube.read(cube_dir / 'npred_cube.fits.gz')
    exposure_cube = SkyCube.read(cube_dir / 'exposure_cube.fits', format='fermi-exposure')
    # print(exposure_cube)
    # print('exposure sum: {}'.format(np.nansum(exposure_cube.data)))
    i_nan = np.where(np.isnan(exposure_cube.data))
    exposure_cube.data[i_nan] = 0

    # npred_cube_convolved = SkyCube.read(cube_dir / 'npred_cube_convolved.fits.gz')

    return dict(counts=npred_cube, exposure=exposure_cube)
Beispiel #10
0
def prepare(galprop_dir, tag='orig', clobber=True):
    """Convert GALPROP output files to a format I like.

    1) Put the Galactic center to the image center, instead
       of having the Galactic anti-center at the image center.
    2) Have the longitude axis increase to the left
    3) Create a total cube, which is the sum of the three
       components.
    4) Use simple file names.

    The modified cubes are written to results_dir, which
    is then used by the GALPROP class."""
    def fix(hdu):
        # Get data and header of the cube
        data, header = hdu.data, hdu.header

        # Fix header
        header['CDELT1'] = -header['CDELT1']
        header['CRVAL1'] = 0

        # fix data
        half_naxis = header['NAXIS1'] / 2
        left = data[:, :, :half_naxis]
        right = data[:, :, half_naxis:]
        data = np.dstack((right, left))
        data = data[:, :, ::-1]

        # Store the changes
        hdu.data, hdu.header = data, header

    def map_id():
        _ = galprop_dir.split('/')
        dirname = _[-1] if _[-1] else _[-2]
        return '_'.join(dirname.split('_')[1:3])

    # Copy and fix all the components
    infiles = []
    for component in components:
        Path(galprop_dir) / (component + '_mapcube_' + map_id() + '.gz')
    outfiles = [filename(tag, ii) for ii in range(len(components))]
    for ii in [1, 2, 3]:  # Total component doesn't exist yet
        logging.info('Fixing {0}'.format(components[ii]))
        hdulist = fits.open(infiles[ii])
        fix(hdulist['PRIMARY'])
        hdulist.writeto(outfiles[ii], clobber=clobber)
    logging.info('Computing total cube')
    total = fits.open(outfiles[1])
    for ii in [2, 3]:
        total['PRIMARY'].data += fits.getdata(outfiles[ii])
    total.writeto(outfiles[0], clobber=clobber)
Beispiel #11
0
    def stack_groups(self):
        """Stack observations in each group """
        stacked_obs = list()

        sorted_table = self.obs_table.group_by('GROUP_ID')
        for group in sorted_table.groups:
            group_id = group['GROUP_ID'][0]
            log.info('Stacking observations in group {}'.format(group_id))
            log.info('{}'.format([group['OBS_ID']]))
            temp = SpectrumObservationList.from_observation_table(group)
            stacked = SpectrumObservation.stack_observation_list(temp)
            stacked.meta.phafile = 'pha_group{}.fits'.format(group_id)
            stacked.meta.ogip_dir = Path.cwd() / 'ogip_data_stacked'

            stacked_obs.append(stacked)

        self.stacked_observations = SpectrumObservationList(stacked_obs)
Beispiel #12
0
    def stack_groups(self):
        """Stack observations in each group """
        stacked_obs = list()

        sorted_table = self.obs_table.group_by('GROUP_ID')
        for group in sorted_table.groups:
            group_id = group['GROUP_ID'][0]
            log.info('Stacking observations in group {}'.format(group_id))
            log.info('{}'.format([group['OBS_ID']]))
            temp = SpectrumObservationList.from_observation_table(group)
            stacked = SpectrumObservation.stack_observation_list(temp)
            stacked.meta.phafile = 'pha_group{}.fits'.format(group_id)
            stacked.meta.ogip_dir = Path.cwd() / 'ogip_data_stacked'

            stacked_obs.append(stacked)

        self.stacked_observations = SpectrumObservationList(stacked_obs)
Beispiel #13
0
def add_bkgmodel_to_indextable(bkg_model_directory, source_name, obsdir):
    """
    Creates an indextable with the location of the bkg files you want to use to compute the bkg model

    Parameters
    ----------
    bkg_model_directory: directory where is located the bkg model you want to use for your bkg image
    source_name: name of the source you want to compute the image
    obsdir: directory where you want to put these data

    Returns
    -------

    """
    ds = DataStore.from_dir(obsdir)
    bgmaker = OffDataBackgroundMaker(ds)
    bkg_model_outdir = Path(bkg_model_directory)
    group_filename = str(bkg_model_outdir / 'group_def.fits')
    index_table = bgmaker.make_total_index_table(ds, "2D", bkg_model_outdir,
                                                 group_filename, True)
    fn = obsdir + '/hdu-index.fits.gz'
    index_table.write(fn, overwrite=True)
Beispiel #14
0
from sherpa.astro.ui import *
from sherpa.utils.err import FitErr
from astropy.stats import gaussian_sigma_to_fwhm
import morphology.utils
import morphology.psf

logger = logging.getLogger('sherpa')
logger.setLevel(logging.WARN)
logging.basicConfig(level=logging.INFO)

# ---------------------------------------------------------
# Check if output significance file exists to make sure we don't waste
# time computing the significance image but not being able to save it
# ---------------------------------------------------------

if (args.overwrite == False and Path(args.significance_image).is_file()):
    logging.error('Output file exists: {0}'.format(args.significance_image))
    from sys import exit
    exit(-1)

# ---------------------------------------------------------
# Load images, PSF and sources
# ---------------------------------------------------------
logging.info('Reading counts: {0}'.format(args.counts))
load_data(args.counts)

logging.info('Reading exposure: {0}'.format(args.exposure))
load_table_model('exposure', args.exposure)

logging.info('Reading background: {0}'.format(args.background))
load_table_model('background', args.background)
# now compute the 2D PSF
psf2D = psf_kernel.make_image(exposures=exposure_at_pos)

# ### Make 2D images from 3D ones
#
# Since sherpa image fitting works only with 2-dim images,
# we convert the generated maps to 2D images using `make_images()` and save them as fits files. The exposure is weighed with the spectrum before averaging (assumed to be a power law by default).
#

# In[ ]:

maps = maker.make_images()

# In[ ]:

Path("analysis_3d").mkdir(exist_ok=True)

maps["counts"].write("analysis_3d/counts_2D.fits", overwrite=True)
maps["background"].write("analysis_3d/background_2D.fits", overwrite=True)
maps["exposure"].write("analysis_3d/exposure_2D.fits", overwrite=True)
fits.writeto("analysis_3d/psf_2D.fits", psf2D.data, overwrite=True)

# ### Read the maps and store them in a sherpa model
#
# We now have the prepared files which sherpa can read.
# This part of the notebook shows how to do image analysis using sherpa

# In[ ]:

import sherpa.astro.ui as sh
Beispiel #16
0
# You don't have to read the code in the next cell; that's just how to downlaod files from Python.
# You could also download the files with your web browser, or from the command line e.g. with curl:
#
#     mkdir hgps_data
#     cd hgps_data
#     curl -O https://www.mpi-hd.mpg.de/hfm/HESS/hgps/data/hgps_catalog_v1.fits.gz
#     curl -O https://www.mpi-hd.mpg.de/hfm/HESS/hgps/data/hgps_map_significance_0.1deg_v1.fits.gz
#
# **The rest of this notebook assumes that you have the data files at ``hgps_data_path``.**

# In[ ]:

# Download HGPS data used in this tutorial to a folder of your choice
# The default `hgps_data` used here is a sub-folder in your current
# working directory (where you started the notebook)
hgps_data_path = Path("hgps_data")

# In this notebook we will only be working with the following files
# so we only download what is needed.
hgps_filenames = [
    "hgps_catalog_v1.fits.gz",
    "hgps_map_significance_0.1deg_v1.fits.gz",
]

# In[ ]:


def hgps_data_download():
    base_url = "https://www.mpi-hd.mpg.de/hfm/HESS/hgps/data/"
    for filename in hgps_filenames:
        url = base_url + filename
Beispiel #17
0
def test_cube_pipe(tmpdir):
    tmpdir = str(tmpdir)
    outdir = tmpdir
    outdir2 = outdir + '/background'
    Path(outdir2).mkdir()

    ds = DataStore.from_dir("$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2")
    ds.copy_obs(ds.obs_table, tmpdir)
    data_store = DataStore.from_dir(tmpdir)

    # Create the background model from the 4 Crab observations
    bgmaker = OffDataBackgroundMaker(data_store, outdir=outdir2)

    bgmaker.select_observations(selection='all')
    bgmaker.group_observations()
    bgmaker.make_model("2D")
    bgmaker.save_models("2D")

    fn = outdir2 + '/group-def.fits'

    # New hdu table that contains the link to the background model
    hdu_index_table = bgmaker.make_total_index_table(
        data_store=data_store,
        modeltype='2D',
        out_dir_background_model=outdir2,
        filename_obs_group_table=fn)

    fn = outdir + '/hdu-index.fits.gz'
    hdu_index_table.write(fn, overwrite=True)

    center = SkyCoord(83.63, 22.01, unit='deg').galactic
    offset_band = Angle([0, 2.49], 'deg')

    ref_cube_images = make_empty_cube(
        image_size=250,
        energy=[Energy(0.5, "TeV"), Energy(100, "TeV"), 5],
        center=center)
    ref_cube_exposure = make_empty_cube(
        image_size=250,
        energy=[Energy(0.1, "TeV"), Energy(120, "TeV"), 80],
        center=center,
        data_unit="m2 s")
    ref_cube_skymask = make_empty_cube(
        image_size=250,
        energy=[Energy(0.5, "TeV"), Energy(100, "TeV"), 5],
        center=center)

    data_store = DataStore.from_dir(tmpdir)

    refheader = ref_cube_images.sky_image_ref.to_image_hdu().header
    exclusion_mask = SkyMask.read(
        '$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')
    exclusion_mask = exclusion_mask.reproject(reference=refheader)
    ref_cube_skymask.data = np.tile(exclusion_mask.data, (5, 1, 1))
    # Pb with the load psftable for one of the run that is not implemented yet...
    data_store.hdu_table.remove_row(14)

    # Cube Analysis
    cube_maker = StackedObsCubeMaker(empty_cube_images=ref_cube_images,
                                     empty_exposure_cube=ref_cube_exposure,
                                     offset_band=offset_band,
                                     data_store=data_store,
                                     obs_table=data_store.obs_table,
                                     exclusion_mask=ref_cube_skymask,
                                     save_bkg_scale=True)
    cube_maker.make_cubes(make_background_image=True, radius=10.)

    assert_allclose(cube_maker.counts_cube.data.sum(), 4898.0, atol=3)
    assert_allclose(cube_maker.bkg_cube.data.sum(), 4260.120595293951, atol=3)
    cube_maker.significance_cube.data[np.where(
        np.isinf(cube_maker.significance_cube.data))] = 0
    assert_allclose(np.nansum(cube_maker.significance_cube.data),
                    67613.24519908393,
                    atol=3)
    assert_allclose(cube_maker.excess_cube.data.sum(),
                    637.8794047060486,
                    atol=3)
    assert_quantity_allclose(np.nansum(cube_maker.exposure_cube.data),
                             Quantity(4891844242766714.0, "m2 s"),
                             atol=Quantity(3, "m2 s"))
    assert_allclose(cube_maker.table_bkg_scale[0]["bkg_scale"],
                    0.8956177614218819)

    assert len(cube_maker.counts_cube.energies()) == 5
    assert len(cube_maker.bkg_cube.energies()) == 5
    assert len(cube_maker.significance_cube.energies()) == 5
    assert len(cube_maker.excess_cube.energies()) == 5
    assert len(cube_maker.exposure_cube.energies()) == 80
Beispiel #18
0
def get_notebooks():
    """Read `notebooks.yaml` info."""
    filename = str(Path("tutorials") / "notebooks.yaml")
    with open(filename) as fh:
        notebooks = yaml.safe_load(fh)
    return notebooks
Beispiel #19
0
from gammapy.irf import EnergyDispersion
from gammapy.cube import SkyCube
from gammapy.cube.sherpa_ import (
    CombinedModel3DInt,
    CombinedModel3DIntConvolveEdisp,
    NormGauss2DInt,
)

from sherpa.models import PowLaw1D, TableModel
from sherpa.estmethods import Covariance
from sherpa.optmethods import NelderMead
from sherpa.stats import Cash
from sherpa.fit import Fit
import sherpa
import os
cube_dir = Path(os.getcwd())
counts_3d = SkyCube.read(cube_dir / 'counts_cube.fits')
cube
cube = counts.to_sherpa_data3d(dstype='Data3DInt')
background
bkg_3d = SkyCube.read(cube_dir / 'bkg_cube.fits')
cube_dir = Path('$GAMMAPY_EXTRA/test_datasets/cube')
bkg_3d = SkyCube.read(cube_dir / 'bkg_cube.fits')
background
bkg_3d
bkg = TableModel('bkg')
bkg.load(None, background.data.value.ravel())
bkg.ampl = 1
bkg.ampl.freeze()
i_nan = np.where(np.isnan(exposure.data))
exposure.data[i_nan] = 0
Beispiel #20
0
# define energy grid
energy = energy_axis.edges * energy_axis.unit

# mean edisp
edisp = obs_list.make_mean_edisp(position=src_pos,
                                 e_true=energy,
                                 e_reco=energy)

# ### Save maps and IRFs to disk
#
# It is common to run the preparation step independent of the likelihood fit, because often the preparation of maps, PSF and energy dispersion is slow if you have a lot of data. We first create a folder:

# In[ ]:

path = Path("analysis_3d")
path.mkdir(exist_ok=True)

# And the write the maps and IRFs to disk by calling the dedicated `.write()` methods:

# In[ ]:

# write maps
maps["counts"].write(str(path / "counts.fits"), overwrite=True)
maps["background"].write(str(path / "background.fits"), overwrite=True)
maps["exposure"].write(str(path / "exposure.fits"), overwrite=True)

# write IRFs
psf_kernel.write(str(path / "psf.fits"), overwrite=True)
edisp.write(str(path / "edisp.fits"), overwrite=True)
Beispiel #21
0
def make_cubes(ereco, etrue, use_etrue, center):
    tmpdir = os.path.expandvars('$GAMMAPY_EXTRA') + "/test_datasets/cube/data"
    outdir = tmpdir
    outdir2 = os.path.expandvars(
        '$GAMMAPY_EXTRA') + '/test_datasets/cube/background'

    if os.path.isdir("data"):
        shutil.rmtree("data")
    if os.path.isdir("background"):
        shutil.rmtree("background")
    Path(outdir2).mkdir()

    ds = DataStore.from_dir("$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2")
    ds.copy_obs(ds.obs_table, tmpdir)
    data_store = DataStore.from_dir(tmpdir)
    # Create a background model from the 4 crab run for the counts ouside the exclusion region. it's just for test, normaly you take 8000 thousands AGN runs to build this kind of model
    axes = [ObservationGroupAxis('ZEN_PNT', [0, 49, 90], fmt='edges')]
    obs_groups = ObservationGroups(axes)
    obs_table_with_group_id = obs_groups.apply(data_store.obs_table)
    obs_groups.obs_groups_table.write(outdir2 + "/group-def.fits",
                                      overwrite=True)
    # Exclusion sources table
    cat = SourceCatalogGammaCat()
    exclusion_table = cat.table
    exclusion_table.rename_column('ra', 'RA')
    exclusion_table.rename_column('dec', 'DEC')
    radius = exclusion_table['morph_sigma']
    radius.value[np.isnan(radius)] = 0.3
    exclusion_table['Radius'] = radius
    exclusion_table = Table(exclusion_table)

    bgmaker = OffDataBackgroundMaker(data_store,
                                     outdir2,
                                     run_list=None,
                                     obs_table=obs_table_with_group_id,
                                     ntot_group=obs_groups.n_groups,
                                     excluded_sources=exclusion_table)
    bgmaker.make_model("2D")
    bgmaker.smooth_models("2D")
    bgmaker.save_models("2D")
    bgmaker.save_models(modeltype="2D", smooth=True)

    shutil.move(str(outdir2), str(outdir))
    fn = outdir + '/background/group-def.fits'
    hdu_index_table = bgmaker.make_total_index_table(
        data_store=data_store,
        modeltype='2D',
        out_dir_background_model="background",
        filename_obs_group_table=fn,
        smooth=True)
    fn = outdir + '/hdu-index.fits.gz'
    hdu_index_table.write(fn, overwrite=True)

    offset_band = Angle([0, 2.49], 'deg')

    ref_cube_images = make_empty_cube(image_size=50,
                                      energy=ereco,
                                      center=center)
    ref_cube_exposure = make_empty_cube(image_size=50,
                                        energy=etrue,
                                        center=center,
                                        data_unit="m2 s")

    data_store = DataStore.from_dir(tmpdir)

    refheader = ref_cube_images.sky_image_ref.to_image_hdu().header
    exclusion_mask = SkyMask.read(
        '$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')
    exclusion_mask = exclusion_mask.reproject(reference=refheader)

    # Pb with the load psftable for one of the run that is not implemented yet...
    data_store.hdu_table.remove_row(14)

    cube_maker = StackedObsCubeMaker(empty_cube_images=ref_cube_images,
                                     empty_exposure_cube=ref_cube_exposure,
                                     offset_band=offset_band,
                                     data_store=data_store,
                                     obs_table=data_store.obs_table,
                                     exclusion_mask=exclusion_mask,
                                     save_bkg_scale=True)
    cube_maker.make_cubes(make_background_image=True, radius=10.)
    obslist = [data_store.obs(id) for id in data_store.obs_table["OBS_ID"]]
    ObsList = ObservationList(obslist)
    mean_psf_cube = make_mean_psf_cube(image_size=50,
                                       energy_cube=etrue,
                                       center_maps=center,
                                       center=center,
                                       ObsList=ObsList,
                                       spectral_index=2.3)
    if use_etrue:
        mean_rmf = make_mean_rmf(energy_true=etrue,
                                 energy_reco=ereco,
                                 center=center,
                                 ObsList=ObsList)

    filename_mask = 'exclusion_mask.fits'
    filename_counts = 'counts_cube.fits'
    filename_bkg = 'bkg_cube.fits'
    filename_significance = 'significance_cube.fits'
    filename_excess = 'excess_cube.fits'
    if use_etrue:
        filename_exposure = 'exposure_cube_etrue.fits'
        filename_psf = 'psf_cube_etrue.fits'
        filename_rmf = 'rmf.fits'
        mean_rmf.write(filename_rmf, clobber=True)
    else:
        filename_exposure = 'exposure_cube.fits'
        filename_psf = 'psf_cube.fits'
    exclusion_mask.write(filename_mask, clobber=True)
    cube_maker.counts_cube.write(filename_counts,
                                 format="fermi-counts",
                                 clobber=True)
    cube_maker.bkg_cube.write(filename_bkg,
                              format="fermi-counts",
                              clobber=True)
    cube_maker.significance_cube.write(filename_significance,
                                       format="fermi-counts",
                                       clobber=True)
    cube_maker.excess_cube.write(filename_excess,
                                 format="fermi-counts",
                                 clobber=True)
    cube_maker.exposure_cube.write(filename_exposure,
                                   format="fermi-counts",
                                   clobber=True)
    mean_psf_cube.write(filename_psf, format="fermi-counts", clobber=True)
Beispiel #22
0
def build_notebooks(args):

    if "GAMMAPY_DATA" not in os.environ:
        logging.info("GAMMAPY_DATA environment variable not set.")
        logging.info(
            "Running notebook tests requires this environment variable.")
        logging.info("Exiting now.")
        sys.exit()

    # prepare folder structure
    pathsrc = Path(args.src)
    path_temp = Path("temp")
    path_empty_nbs = Path("tutorials")
    path_filled_nbs = Path("docs") / "notebooks"
    path_static_nbs = Path("docs") / "_static" / "notebooks"

    rmtree(str(path_temp), ignore_errors=True)
    path_temp.mkdir(parents=True, exist_ok=True)
    path_filled_nbs.mkdir(parents=True, exist_ok=True)
    path_static_nbs.mkdir(parents=True, exist_ok=True)

    if pathsrc == path_empty_nbs:
        rmtree(str(path_temp), ignore_errors=True)
        rmtree(str(path_static_nbs), ignore_errors=True)
        rmtree(str(path_filled_nbs), ignore_errors=True)
        copytree(str(path_empty_nbs), str(path_temp), ignore=ignorefiles)
    elif pathsrc.exists():
        notebookname = pathsrc.name
        pathdest = path_temp / notebookname
        copyfile(str(pathsrc), str(pathdest))
    else:
        logging.info("Notebook file does not exist.")
        sys.exit()

    # strip and blackformat
    subprocess.call("gammapy jupyter --src temp black", shell=True)
    subprocess.call("gammapy jupyter --src temp strip", shell=True)

    # test /run
    passed = True
    for path in path_temp.glob("*.ipynb"):
        if not notebook_test(path):
            passed = False

    # convert into scripts
    # copy generated filled notebooks to doc
    # if passed:

    if pathsrc == path_empty_nbs:
        # copytree is needed to copy subfolder images
        copytree(str(path_empty_nbs), str(path_static_nbs), ignore=ignoreall)
        for path in path_static_nbs.glob("*.ipynb"):
            subprocess.call("jupyter nbconvert --to script '{}'".format(
                str(path)),
                            shell=True)
        copytree(str(path_temp), str(path_filled_nbs), ignore=ignorefiles)
    else:
        pathsrc = path_temp / notebookname
        pathdest = path_static_nbs / notebookname
        copyfile(str(pathsrc), str(pathdest))
        subprocess.call("jupyter nbconvert --to script '{}'".format(
            str(pathdest)),
                        shell=True)
        pathdest = path_filled_nbs / notebookname
        copyfile(str(pathsrc), str(pathdest))

    # else:
    #    logging.info("Tests have not passed.")
    #    logging.info("Tutorials not ready for documentation building process.")
    #    rmtree(str(path_static_nbs), ignore_errors=True)

    # tear down
    rmtree(str(path_temp), ignore_errors=True)
from sherpa.stats import Cash
from sherpa.fit import Fit


# ## 3D analysis assuming that there is no energy dispersion (perfect energy resolution)
# 
# ### Load the different cubes needed for the analysis
# 
# We will use the Cubes build on the 4 Crab observations of gammapy-extra. You could use the Cubes we just created with the notebook cube_analysis.ipynb by changing the cube_directory by your local path.
# 
# - Counts cube

# In[3]:


cube_dir = Path('$GAMMAPY_EXTRA/test_datasets/cube')
counts_3d = SkyCube.read(cube_dir / 'counts_cube.fits')
# Transformation to a sherpa object
cube = counts_3d.to_sherpa_data3d(dstype='Data3DInt')


# - Background Cube

# In[4]:


bkg_3d = SkyCube.read(cube_dir / 'bkg_cube.fits')
bkg = TableModel('bkg')
bkg.load(None, bkg_3d.data.value.ravel())
bkg.ampl = 1
bkg.ampl.freeze()
Beispiel #24
0
        if new_line:
            output = output + new_line
        else:
            output = output + line
    return output


# check gammapy-extra
if 'GAMMAPY_EXTRA' not in os.environ:
    logging.info('GAMMAPY_EXTRA environment variable not set.')
    logging.info('Running notebook tests requires gammapy-extra.')
    logging.info('Exiting now.')
    sys.exit()

# get list of notebooks
dirnbs = Path(os.environ['GAMMAPY_EXTRA']) / 'notebooks'
yamlfile = Path(os.environ['GAMMAPY_EXTRA']) / \
    'notebooks' / 'notebooks.yaml'
with open(str(yamlfile)) as fh:
    notebooks = yaml.safe_load(fh)

# scan notebooks
for notebook in notebooks:

    if not notebook['test']:
        logging.info(
            'Skipping notebook {} because test=false.'.format(notebook['name']))
        continue

    notebookfile = notebook['name'] + '.ipynb'
    filepath = dirnbs / notebookfile
Beispiel #25
0
hdu.name = "HDU_INDEX"
hdu_list.append(hdu)

hdu = fits.BinTableHDU(data_store.obs_table)
hdu_list.append(hdu)

for idx, model in enumerate(models):
    hdu = model.to_fits()
    hdu.name = "BKG{}".format(idx)
    hdu_list.append(hdu)

print([_.name for _ in hdu_list])

import os

path = (Path(os.environ["GAMMAPY_DATA"]) /
        "hess-dl3-dr1/hess-dl3-dr3-with-background.fits.gz")
hdu_list.writeto(str(path), overwrite=True)

# In[ ]:

# Let's see if it's possible to access the data
ds2 = DataStore.from_file(path)
ds2.info()
obs = ds2.obs(20136)
obs.events
obs.aeff
background2d_peek(obs.bkg)

# ## Exercises
#
# calculate the horizontal coordinates
crab_altaz = c2.transform_to(AltAz(obstime=now, location=paris))

print(crab_altaz)

# ## Table: Manipulating the 3FGL catalog
#
# Here we are going to do some selections with the 3FGL catalog. To do so we use the Table class from astropy.
#
# ### Accessing the table
# First, we need to open the catalog in a Table.

# In[ ]:

# Open Fermi 3FGL from the repo
filename = os.environ["GAMMAPY_DATA"] / Path(
    "catalogs/fermi/gll_psc_v16.fit.gz")
table = Table.read(str(filename), hdu=1)
# Alternatively, one can grab it from the server.
# table = Table.read("http://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit")

# In[ ]:

# Note that a single FITS file might contain different tables in different HDUs
filename = os.environ["GAMMAPY_DATA"] / Path(
    "catalogs/fermi/gll_psc_v16.fit.gz")
# You can load a `fits.HDUList` and check the extension names
print([_.name for _ in fits.open(str(filename))])
# Then you can load by name or integer index via the `hdu` option
extended_source_table = Table.read(str(filename), hdu="ExtendedSources")

# ### General informations on the Table