Beispiel #1
0
def load_cubes(config):
    cube_dir = Path(config['logging']['working_dir'])
    npred_cube = SkyCube.read(cube_dir / 'npred_cube.fits.gz')
    exposure_cube = SkyCube.read(cube_dir / 'exposure_cube.fits', format='fermi-exposure')
    # print(exposure_cube)
    # print('exposure sum: {}'.format(np.nansum(exposure_cube.data)))
    i_nan = np.where(np.isnan(exposure_cube.data))
    exposure_cube.data[i_nan] = 0

    # npred_cube_convolved = SkyCube.read(cube_dir / 'npred_cube_convolved.fits.gz')

    return dict(counts=npred_cube, exposure=exposure_cube)
Beispiel #2
0
def prepare_images():
    # Read in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file)
    exposure_cube = SkyCube.read(exposure_file)

    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')

    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube,
                                         psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SkyCube(data=Quantity(counts_data, ''),
                          wcs=counts_wcs,
                          energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube,
                                           projection_type='nearest-neighbor')

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()

    return model, gtmodel, ratio, counts, header
def make_ref_cube(config):
    WCS_SPEC = {
        'nxpix': config['binning']['nxpix'],
        'nypix': config['binning']['nypix'],
        'binsz': config['binning']['binsz'],
        'xref': config['pointing']['ra'],
        'yref': config['pointing']['dec'],
        'proj': config['binning']['proj'],
        'coordsys': config['binning']['coordsys'],
    }

    # define reconstructed energy binning
    ENERGY_SPEC = {
        'mode': 'edges',
        'enumbins': config['binning']['enumbins'],
        'emin': config['selection']['emin'],
        'emax': config['selection']['emax'],
        'eunit': 'TeV',
    }

    CUBE_SPEC = {}
    CUBE_SPEC.update(WCS_SPEC)
    CUBE_SPEC.update(ENERGY_SPEC)
    cube = SkyCube.empty(**CUBE_SPEC)
    return cube
Beispiel #4
0
def make_empty_cube(image_size, energy, center, data_unit=""):
    """
    Parameters
    ----------
    image_size:int, Total number of pixel of the 2D map
    energy: Energybounds
    center: SkyCoord of the source
    unit : str, Data unit.
    """
    def_image = dict()
    def_image["nxpix"] = image_size
    def_image["nypix"] = image_size
    def_image["binsz"] = 0.02
    def_image["xref"] = center.galactic.l.deg
    def_image["yref"] = center.galactic.b.deg
    def_image["proj"] = 'TAN'
    def_image["coordsys"] = 'GAL'
    def_image["unit"] = data_unit
    e_min = energy[0]
    e_max = energy[-1]
    nbins = len(energy) - 1
    empty_cube = SkyCube.empty(emin=e_min.value,
                               emax=e_max.value,
                               enumbins=nbins,
                               eunit=e_min.unit,
                               mode='edges',
                               **def_image)
    return empty_cube
Beispiel #5
0
def prepare_images():
    # Read in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file)
    exposure_cube = SkyCube.read(exposure_file)

    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')

    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube, psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SkyCube(data=Quantity(counts_data, ''),
                          wcs=counts_wcs,
                          energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube, projection_type='nearest-neighbor')

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()

    return model, gtmodel, ratio, counts, header
Beispiel #6
0
def prepare_images():
    # Read in data
    fermi_vela = FermiVelaRegion()
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file, format='fermi-background')
    exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure')

    # Re-project background cube
    repro_bg_cube = background_model.reproject(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube,
                                    exposure_cube,
                                    energies,
                                    integral_resolution=5)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    kernels = psf.kernels(npred_cube)
    convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect')

    # Counts data
    counts_cube = SkyCube.read(counts_file, format='fermi-counts')
    counts_cube = counts_cube.reproject(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()
    return model, gtmodel, ratio, counts, header
    def _get_ref_cube(self, enumbins=11):
        p = self.parameters

        wcs = self.reference.wcs.deepcopy()
        shape = (enumbins, ) + self.reference.data.shape
        data = np.zeros(shape)

        energy = Energy.equal_log_spacing(p['emin'], p['emax'], enumbins,
                                          'TeV')
        energy_axis = LogEnergyAxis(energy, mode='center')
        return SkyCube(data=data, wcs=wcs, energy_axis=energy_axis)
Beispiel #8
0
def prepare_images():
    # Read in data
    fermi_vela = FermiVelaRegion()
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file, format='fermi-background')
    exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure')

    # Re-project background cube
    repro_bg_cube = background_model.reproject(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies,
                                    integral_resolution=5)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    kernels = psf.kernels(npred_cube)
    convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect')

    # Counts data
    counts_cube = SkyCube.read(counts_file, format='fermi-counts')
    counts_cube = counts_cube.reproject(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()
    return model, gtmodel, ratio, counts, header
Beispiel #9
0
def main():
    # Read cubes
    cube_on = SkyCube.read('non_cube.fits.gz')
    cube_off = SkyCube.read('noff_cube.fits.gz')

    #Read config

    config = yaml.load(open('config.yaml'))
    binsz = config['binning']['binsz']
    offset_fov = config['selection']['offset_fov']

    #countson_vals = []
    #countsoff_vals = []
    diff_vals = np.ones(int(config['binning']['enumbins']))
    sigmaslimas = np.ones(int(config['binning']['enumbins']))

    # Define PSF region
    irffile = 'irf_file.fits'
    psf_table = psf_fromfits(irffile)
    psfs = psf_table[3]

    on_sizes = np.ones(int(config['binning']['enumbins'])) * u.deg
    energarr = cube_on.energies('edges')
    for idx in range(len(cube_on.energies('center'))):
        i = np.argmin(np.abs(energarr[idx].value - psf_table[0].value))
        j = np.argmin(np.abs(offset_fov - psf_table[2].value))
        on_sizes.value[idx] = psfs[0][j][i] * 2.12

    alpha_obs = 0.2
    on_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')

    ##Debug
    #print(on_sizes/binsz)

    off_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')
    off_sizes = on_sizes / np.sqrt(alpha_obs)

    on_data = Table()
    off_data = Table()
    on_data['value'] = np.zeros(len(on_sizes))
    off_data['value'] = np.zeros(len(on_sizes))

    for i in range(cube_on.data.shape[0]):

        # Make PSF region
        on_region = CircleSkyRegion(on_pos, on_sizes[i])
        off_region = CircleSkyRegion(off_pos, off_sizes[i])

        # Take spectrum
        on_data['value'][i] = cube_on.spectrum(on_region)['value'][i]
        off_data['value'][i] = cube_off.spectrum(off_region)['value'][
            i]  #* alpha_obs
        non_val = on_data['value'][i]
        noff_val = off_data['value'][i]
        diff_vals[i] = non_val - noff_val

        if non_val != 0 and noff_val != 0:
            siglima = np.sqrt(2) * np.sqrt(non_val * np.log(
                (1.0 + (1.0 / alpha_obs)) * non_val /
                (non_val + noff_val)) + noff_val * np.log(
                    (alpha_obs + 1.0) * noff_val / (non_val + noff_val)))
        elif non_val != 0 and noff_val == 0:
            siglima = np.sqrt(2) * np.sqrt(non_val * np.log(
                (1.0 + (1.0 / alpha_obs))))
        else:
            siglima = 0
        sigmaslimas[i] = siglima

    ##Debug
    #non_val = cube_on.data.sum().value
    #noff_val = cube_off.data.sum().value

    lo_lim_idx = np.where(
        abs(cube_on.energies('edges').value -
            0.4) == np.min(abs(cube_on.energies('edges').value - 0.4)))[0][0]
    max_energ_idx = np.where(
        abs(cube_on.energies('edges').value -
            3.0) == np.min(abs(cube_on.energies('edges').value - 3.0)))[0][0]
    non_val = on_data['value'][lo_lim_idx:max_energ_idx].sum()
    noff_val = off_data['value'][lo_lim_idx:max_energ_idx].sum()

    siglima = np.sqrt(2) * np.sqrt(non_val * np.log(
        (1.0 + (1.0 / alpha_obs)) * non_val /
        (non_val + noff_val)) + noff_val * np.log(
            (alpha_obs + 1.0) * noff_val / (non_val + noff_val)))

    #print('On events: ', on_data)
    #print('Off events: ', off_data)
    diff_vals[np.isnan(diff_vals)] = 0
    sigmaslimas[np.isnan(sigmaslimas)] = 0
    print('Excess: ', diff_vals)
    print('Total positive Excess: ', diff_vals[diff_vals > 0].sum())
    print('LiMa by energy bins: ', sigmaslimas)
    print('Total LiMa: ', siglima, 'Energy range: ',
          cube_on.energies('edges')[lo_lim_idx], ' - ',
          cube_on.energies('edges')[max_energ_idx])

    lo_lim_idx = np.where(
        abs(cube_on.energies('edges').value -
            1.0) == np.min(abs(cube_on.energies('edges').value - 1.0)))[0][0]
    non_val = on_data['value'][lo_lim_idx:max_energ_idx].sum()
    noff_val = off_data['value'][lo_lim_idx:max_energ_idx].sum()

    siglima_tves = np.sqrt(
        2) * np.sqrt(non_val * np.log(2 * non_val / (non_val + noff_val)) +
                     noff_val * np.log(2 * noff_val / (non_val + noff_val)))

    print('Total LiMa: ', siglima_tves, 'Energy range: ',
          cube_on.energies('edges')[lo_lim_idx], ' - ',
          cube_on.energies('edges')[max_energ_idx])

    return [siglima, siglima_tves, on_data, off_data, diff_vals, sigmaslimas]
Beispiel #10
0
        "_TeV.fits")["exposure"].data * 1e4
    #exposure_data[i_E,:,:] = SkyImageList.read(outdir_data + "/fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["exposure"].data
    bkg_data[i_E, :, :] = SkyImageList.read(outdir_data + "/fov_bg_maps" +
                                            str(E1) + "_" + str(E2) +
                                            "_TeV.fits")["bkg"].data
    psf_file = Table.read(outdir_data + "/psf_table_" + source_name + "_" +
                          str(E1) + "_" + str(E2) + ".fits")
    header = on.to_image_hdu().header
    psf_data[i_E, :, :] = fill_acceptance_image(
        header, on.center, psf_file["theta"].to("deg"),
        psf_file["psf_value"].data, psf_file["theta"].to("deg")[-1]).data

#logenergy_axis=LogEnergyAxis(energy_bins[0:imax+1],mode='edges')
logenergy_axis = LogEnergyAxis(energy_bins, mode='edges')
counts_3D = SkyCube(name="counts3D",
                    data=Quantity(counts, " "),
                    wcs=on.wcs,
                    energy_axis=logenergy_axis)
cube = counts_3D.to_sherpa_data3d(dstype='Data3DInt')
logenergy_axis = LogEnergyAxis(energy_bins[0:imax + 1], mode='edges')
psf_image_3D = SkyCube(name="counts3D",
                       data=Quantity(psf_data, " "),
                       wcs=on.wcs,
                       energy_axis=logenergy_axis)
exposure_image_3D = SkyCube(name="counts3D",
                            data=Quantity(exposure_data, "m2 s "),
                            wcs=on.wcs,
                            energy_axis=logenergy_axis)

spatial_model = NormGauss2DInt('spatial-model')
spectral_model = PowLaw1D('spectral-model')
#spectral_model = MyPLExpCutoff('spectral-model')
Beispiel #11
0
                               use_cube,
                               use_etrue=False)
outdir_result = make_outdir_filesresult(source_name,
                                        name_method_fond,
                                        len(energy_bins),
                                        config_name,
                                        image_size,
                                        for_integral_flux,
                                        use_cube,
                                        use_etrue=False)
"""
Source model paramaters initial
"""
#Dans HGPS, c est une gaussienne de 0.05deg en sigma donc *2.35 pour fwhm
#avec HESS meme une source pontuelle ne fera jamais en dessous de 0.03-0.05 degre,
counts_3D = SkyCube.read(outdir_data + "/counts_cube.fits")
cube = counts_3D.to_sherpa_data3d(dstype='Data3DInt')
bkg_3D = SkyCube.read(outdir_data + "/bkg_cube.fits")
exposure_3D = SkyCube.read(outdir_data + "/exposure_cube.fits")
i_nan = np.where(np.isnan(exposure_3D.data))
exposure_3D.data[i_nan] = 0
exposure_3D.data = exposure_3D.data * 1e4
psf_3D = SkyCube.read(outdir_data + "/mean_psf_cube_" + source_name + ".fits",
                      format="fermi-counts")

# Setup combined spatial and spectral model
spatial_model = NormGauss2DInt('spatial-model')
spectral_model = PowLaw1D('spectral-model')
#spectral_model = MyPLExpCutoff('spectral-model')
source_model = CombinedModel3DInt(use_psf=True,
                                  exposure=exposure_3D,
def main():
    
    #Low energy of spectral fitting range.
    lo_fit_energ = 0.1 * u.Unit('TeV')
    hi_fit_energ = 10 * u.Unit('TeV')
    
    #If you want an internal estimation of a high energy limit for the fitting range: est_hi_lim = 'yes'. If 'no' the hi_fit_energ will be used.
    est_hi_lim = 'yes'
    
    # Read ON and OFF cubes
    filename_on = 'non_cube.fits.gz' # non_cube_convolved.fits
    cube_on = SkyCube.read(filename_on)
    
    ann_filename_off = 'noff_withpuls_cube.fits.gz'
    ann_cube_off = SkyCube.read(ann_filename_off)
    circ_filename_off = 'noff_withneb_cube.fits.gz'
    circ_cube_off = SkyCube.read(circ_filename_off)
    
    # Read config and IRFs
    config = read_config('config.yaml')
    irfs = get_irfs(config)
    offset = Angle(config['selection']['offset_fov'] * u.deg)
    livetime = u.Quantity(config['pointing']['livetime']).to('second')
    alpha_obs = 1.
    binsz = config['binning']['binsz']
    aeff = irfs['aeff'].to_effective_area_table(offset = offset, energy = cube_on.energies('edges'))
    edisp = irfs['edisp'].to_energy_dispersion(offset = offset, e_true = aeff.energy.bins, e_reco = cube_on.energies('edges') )
    
    # Define circular on/off Regions parameters
    on_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')
    on_sizes = np.ones(20) * binsz * u.deg
    
    off_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')
    off_sizes = on_sizes * np.sqrt(1./alpha_obs)
    
    # Make Annular region
    on_rad_sizes = np.ones(len(on_sizes)) * 0.1 * binsz * u.deg
    off_rad_sizes = on_rad_sizes * np.sqrt(1./alpha_obs)
    widths = np.ones(len(on_sizes)) * 22 * binsz * u.deg
    out_rad_sizes = on_rad_sizes + widths
    
    ann_on_data, ann_off_data, ann_stats = make_annular_spectrum(on_pos, off_pos, on_rad_sizes, off_rad_sizes, out_rad_sizes, cube_on, ann_cube_off, alpha_obs)
    
    # Make circular region
    circ_on_data, circ_off_data, circ_stats = make_circular_spectrum(on_pos, off_pos, on_sizes, off_sizes, cube_on, circ_cube_off, alpha_obs)

    # Undo "holes" in circ/ann_stats
    if np.max(np.where(circ_stats == 1)) + 1 != circ_stats.sum():
        circ_stats[0:np.max(np.where(circ_stats == 1)) + 1][circ_stats[0:np.max(np.where(circ_stats == 1)) + 1] == 0] = 1.
    if np.max(np.where(ann_stats == 1)) + 1 != ann_stats.sum():
        ann_stats[0:np.max(np.where(ann_stats == 1)) + 1][ann_stats[0:np.max(np.where(ann_stats == 1)) + 1] == 0] = 1.
    
    # Make on/off vector
    ann_on_vector = PHACountsSpectrum(energy_lo = cube_on.energies('edges')[:-1], energy_hi= cube_on.energies('edges')[1:], data= ann_on_data['value'].data * ann_stats * u.ct, backscal = on_sizes[0].value, meta={'EXPOSURE' : livetime.value})
    circ_on_vector = PHACountsSpectrum(energy_lo = cube_on.energies('edges')[:-1], energy_hi= cube_on.energies('edges')[1:], data= circ_on_data['value'].data * circ_stats * u.ct, backscal = on_sizes[0].value, meta={'EXPOSURE' : livetime.value})
    
    
    ann_off_vector = PHACountsSpectrum(energy_lo = ann_cube_off.energies('edges')[:-1], energy_hi= ann_cube_off.energies('edges')[1:], data= ann_off_data['value'].data * ann_stats * u.ct, backscal = off_sizes[0].value, meta={'EXPOSURE' : livetime.value, 'OFFSET' : 0.3 * u.deg})
    circ_off_vector = PHACountsSpectrum(energy_lo = circ_cube_off.energies('edges')[:-1], energy_hi= circ_cube_off.energies('edges')[1:], data= circ_off_data['value'].data * circ_stats * u.ct, backscal = off_sizes[0].value, meta={'EXPOSURE' : livetime.value, 'OFFSET' : 0.3 * u.deg})

    # Make SpectrumObservation

    ann_sed_table = SpectrumObservation(on_vector = ann_on_vector, off_vector = ann_off_vector, aeff = aeff, edisp = edisp)
    circ_sed_table = SpectrumObservation(on_vector = circ_on_vector, off_vector = circ_off_vector, aeff = aeff, edisp = edisp)

    ##Debug
    #print(ann_stats)
    #print(circ_stats)
    
    # Define Spectral Model

    model2fit1 = LogParabola(amplitude=1e-11 * u.Unit('cm-2 s-1 TeV-1'), reference=1 * u.TeV, alpha=2.5 * u.Unit(''), beta=0.1 * u.Unit(''))
    model2fit2 = ExponentialCutoffPowerLaw(index = 1. * u.Unit(''), amplitude = 1e-11 * u.Unit('cm-2 s-1 TeV-1'), reference= 1 * u.TeV,  lambda_= 0. * u.Unit('TeV-1'))
    model2fit3 = PowerLaw(index= 2.5 * u.Unit(''), amplitude= 5e-11 * u.Unit('cm-2 s-1 TeV-1'), reference= 0.15 * u.TeV)

    model2fit3.parameters['amplitude'].parmin = 1e-12
    model2fit3.parameters['amplitude'].parmax = 1e-10
    
    model2fit3.parameters['index'].parmin = 2.0
    model2fit3.parameters['index'].parmax = 4.0

    #Models to fit the circular and annular observations
    models_ann_fit = [model2fit1, model2fit2, model2fit3]
    models_circ_fit = [model2fit1, model2fit2, model2fit3]
    
    # Fit
    if est_hi_lim = 'yes':
        hi_fit_energ = cube_on.energies('edges')[int(np.sum(ann_stats))]
Beispiel #13
0
    'xref': 83.63,
    'yref': 22.01,
    'proj': 'TAN',
    'coordsys': 'CEL'
}

# define reconstructed energy binning
ENERGY_SPEC = {
    'mode': 'edges',
    'enumbins': 5,
    'emin': 0.5,
    'emax': 40,
    'eunit': 'TeV'
}

REF_CUBE = SkyCube.empty(emin=0.5, emax=40.0, enumbins=5, **WCS_SPEC)
# setting up the data store
data_store = DataStore.from_dir("$GAMMAPY_EXTRA/test_datasets/cube/data")

# temporary fix for load psftable for one of the run that is not implemented yet...
data_store.hdu_table.remove_row(14)
# read in TeVCat exclusion mask
exclusion_mask = SkyImage.read(
    '$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')

# reproject exclusion mask to reference cube
exclusion_mask = exclusion_mask.reproject(reference=REF_CUBE.sky_image_ref,
                                          order='nearest-neighbor')
exclusion_mask.show()

#Select the offset band on which you want to select the events in the FOV of each observation
# define reconstructed energy binning
ENERGY_SPEC = {
    'mode': 'edges',
    'enumbins': 5,
    'emin': 0.5,
    'emax': 40,
    'eunit': 'TeV'
}

# From this we create an empty `SkyCube` object, that we will use as reference for all other sky cube data:

# In[5]:

# instanciate reference cube
REF_CUBE = SkyCube.empty(**WCS_SPEC, **ENERGY_SPEC)

# Next we create a `DataStore` object, which allows us to access and load DL3 level data. In this case we will use four Crab runs of simulated HESS data, which is bundled in gammapy-extra:

# In[6]:

# setting up the data store
data_store = DataStore.from_dir("$GAMMAPY_EXTRA/test_datasets/cube/data")

# temporary fix for load psftable for one of the run that is not implemented yet...
data_store.hdu_table.remove_row(14)

# For the later background estimation we define an exclusion mask. The background image will normalized on the counts map outside the exclusion region (the so called FOV background method). For convenience we use and all-sky exclusin mask, which was created from the TeVCat catalog and reproject it to our analysis region:

# In[7]:
Beispiel #15
0
dirname = '$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2'
log.info('Reading data from {}'.format(dirname))
data_store = DataStore.from_dir(dirname)
obs = data_store.obs(23523)

events = obs.events
log.info('Number of events in event list: {}'.format(len(events)))
log.info('Max. event energy: {}'.format(events['ENERGY'].max()))
log.info('Min. event energy: {}'.format(events['ENERGY'].min()))
aeff = obs.aeff

counts = SkyCube.empty(emin=0.5,
                       emax=80,
                       enumbins=8,
                       eunit='TeV',
                       nxpix=200,
                       nypix=200,
                       xref=events.meta['RA_OBJ'],
                       yref=events.meta['DEC_OBJ'],
                       dtype='int',
                       coordsys='CEL')

log.info('Bin events into cube.')
counts.fill_events(events)
log.info('Counts cube shape: {}'.format(counts.data.shape))
log.info('Number of events in cube: {}'.format(counts.data.sum()))
counts.write('counts.fits.gz', format='fermi-counts', clobber=True)

# Exposure cube
pointing = SkyCoord(events.meta['RA_PNT'],
                    events.meta['DEC_PNT'],
                    "icrs",
Beispiel #16
0
def main():

    # Read file to fit
    #filename = 'nexcess_cube.fits.gz'
    filename = 'non_cube_convolved.fits.gz'
    cube = SkyCube.read(filename)

    # Read configuration
    config = read_config('config.yaml')
    binsz = config['binning']['binsz']
    offset_fov = config['selection']['offset_fov']

    # Take PSF data
    irffile = 'irf_file.fits'
    psf_table = psf_fromfits(irffile)

    energarr = cube.energies('edges')
    sigmas = psf_table[3]
    norms = psf_table[4]

    hdu = pyfits.open(filename)

    im_sizex = hdu[0].header['NAXIS1']
    im_sizey = hdu[0].header['NAXIS2']

    cx = 0.5 * im_sizex
    cy = 0.5 * im_sizey

    # Check the significance
    filename_on = 'non_cube.fits.gz'

    cube_on = SkyCube.read(filename_on)

    filename_off = 'noff_cube.fits.gz'

    cube_off = SkyCube.read(filename_off)
    alpha_obs = 1.

    on_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')
    on_sizes = np.ones(len(
        cube.energies('center'))) * 120 * binsz * u.deg  #0.167

    off_pos = SkyCoord(83.6333 * u.deg, 22.0144 * u.deg, frame='icrs')
    off_sizes = on_sizes * alpha_obs

    on_data = Table()
    off_data = Table()
    on_data['value'] = np.zeros(len(on_sizes))
    off_data['value'] = np.zeros(len(on_sizes))
    for idx in range(len(cube.energies('center'))):

        on_region = CircleSkyRegion(on_pos, on_sizes[idx])
        off_region = CircleSkyRegion(off_pos, off_sizes[idx])

        on_data['value'][idx] = cube_on.spectrum(on_region)['value'][idx]
        off_data['value'][idx] = cube_off.spectrum(off_region)['value'][idx]

        limasig = np.sqrt(
            2) * np.sqrt(on_data['value'][idx] * np.log(
                ((1 + alpha_obs) / alpha_obs) * on_data['value'][idx] /
                (on_data['value'][idx] + off_data['value'][idx])) +
                         off_data['value'][idx] * np.log(
                             (1 + alpha_obs) * off_data['value'][idx] /
                             (on_data['value'][idx] + off_data['value'][idx])))

        print(limasig, 'Energy range: ',
              cube_on.energies('edges')[idx], ' - ',
              cube_on.energies('edges')[idx + 1])

        #Fit only if data is enough
        #and on_data['value'][i] - off_data['value'][i] >= 0.01 * off_data['value'][i]
        if limasig >= 3 and on_data['value'][idx] - off_data['value'][idx] >= 7:

            # Make image cube from slice excess convolved cube
            cube_sum = np.zeros(
                (cube.data.shape[1], cube.data.shape[2])) * u.ct
            cube_sum = np.add(cube_sum, cube.data[idx])

            cube_sum.value[np.isnan(cube_sum.value)] = 0
            cube_sum.value[cube_sum.value < 0] = abs(
                cube_sum.value[cube_sum.value < 0])

            image_sum = SkyCube.empty_like(cube)
            image_sum.data = cube_sum

            image_sum.write('sum_image.fits.gz', overwrite=True)

            # Find nearest energy and theta value
            i = np.argmin(np.abs(energarr[idx].value -
                                 psf_table[0].value))  ######
            j = np.argmin(np.abs(offset_fov - psf_table[2].value))

            # Make PSF
            #psfname="mypsf"
            #load_user_model(PSFGauss,psfname)
            s1 = sigmas[0][j][i] / binsz
            s2 = sigmas[1][j][i] / binsz
            s3 = sigmas[2][j][i] / binsz
            print(sigmas[0][j][i], sigmas[1][j][i], sigmas[2][j][i])
            ampl = norms[0][j][i]
            ampl2 = norms[1][j][i]
            ampl3 = norms[2][j][i]

            t0 = time()

            #Morphological fitting
            load_image("sum_image.fits.gz")
            #image_data()

            #set_coord("physical")

            set_method("simplex")
            set_stat("cash")

            # Position and radius
            x0 = 125
            y0 = 125
            rad0 = 80.0

            image_getregion(coord="physical")
            'circle(x0,y0,rad0);'

            notice2d("circle(" + str(x0) + "," + str(y0) + "," + str(rad0) +
                     ")")

            load_user_model(GaussianSource, "sph2d")
            add_user_pars("sph2d", [
                "sigma1", "sigma2", "sigma3", "alpha", "beta", "ampl", "size",
                "xpos", "ypos"
            ])

            set_model(sph2d + const2d.bgnd)

            # Constant PSF
            #gpsf.fwhm = 4.2
            #gpsf.xpos = x0
            #gpsf.ypos = y0
            #gpsf.ellip = 0.2
            #gpsf.theta = 30 * np.pi / 180

            #### Set PSF
            set_par(sph2d.sigma1, val=s1, frozen=True)
            set_par(sph2d.sigma2, val=0, frozen=True)
            set_par(sph2d.sigma3, val=0, frozen=True)
            set_par(sph2d.alpha, val=0, frozen=True)
            set_par(sph2d.beta, val=0, frozen=True)

            # HESS PSF
            #set_par(sph2d.sigma1, val = 0.025369, frozen = True)
            #set_par(sph2d.alpha, val = 0.691225, frozen = True)
            #set_par(sph2d.sigma2, val = 0.0535014, frozen = True)
            #set_par(sph2d.beta, val = 0.13577, frozen = True)
            #set_par(sph2d.sigma3, val = 0.11505, frozen = True)

            set_par(sph2d.xpos, val=x0, frozen=True)
            set_par(sph2d.ypos, val=y0, frozen=True)

            set_par(sph2d.ampl, val=10000, min=1e-11, max=100000000)
            set_par(sph2d.size, val=10, min=1e-11, max=100)

            set_par(bgnd.c0, val=1, min=0, max=100)

            show_model()
            fit()
            #do_fit()
            conf()
            #do_conf()
            #image_fit()
            #save_model("model_" + str(idx) + ".fits")
            #save_resid("resid_" + str(idx) + ".fits")

            t1 = time()
            print('Simul time', t1 - t0)
Beispiel #17
0
"""Test npred model image computation.
"""
from astropy.coordinates import Angle
from gammapy.datasets import FermiGalacticCenter
from gammapy.utils.energy import EnergyBounds
from gammapy.irf import EnergyDependentTablePSF
from gammapy.cube import SkyCube, compute_npred_cube, convolve_cube

filenames = FermiGalacticCenter.filenames()
flux_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
psf = EnergyDependentTablePSF.read(filenames['psf'])

flux_cube = flux_cube.reproject_to(exposure_cube)

energy_bounds = EnergyBounds([10, 30, 100, 500], 'GeV')
npred_cube = compute_npred_cube(flux_cube, exposure_cube, energy_bounds)

offset_max = Angle(1, 'deg')
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
Beispiel #18
0
else:
    center = SkyCoord.from_name(
        input_param["general"]["sourde_name_skycoord"]).galactic
extraction_size = input_param["param_fit_3D"]["extraction_region"]
empty_cube_reco = make_empty_cube(extraction_size,
                                  energy_bins,
                                  center,
                                  data_unit="")
empty_cube_true = make_empty_cube(extraction_size,
                                  energy_bins_true,
                                  center,
                                  data_unit="")
"""
Define SkyCube
"""
cube_mask = SkyCube.read("skycube_mask_CG_binE_" +
                         str(input_param["energy binning"]["nbin"]) + ".fits")
index_region_selected_3d = np.where(cube_mask.data.value == 1)

counts_3D = SkyCube.read(outdir_data + "/counts_cube.fits").cutout(
    center, extraction_size)
coord = counts_3D.sky_image_ref.coordinates(mode="edges")
energies = counts_3D.energies(mode='edges').to("TeV")
cube = counts_3D.to_sherpa_data3d(dstype='Data3DInt')
#apply the cube_mask
cube.mask = cube_mask.data.value.ravel()

bkg_3D = SkyCube.read(outdir_data + "/bkg_cube.fits").cutout(
    center, extraction_size)
exposure_3D = SkyCube.read(outdir_data + "/exposure_cube.fits").cutout(
    center, extraction_size)
i_nan = np.where(np.isnan(exposure_3D.data))
Beispiel #19
0
from gammapy.cube import SkyCube
from gammapy.cube.sherpa_ import (
    CombinedModel3DInt,
    CombinedModel3DIntConvolveEdisp,
    NormGauss2DInt,
)

from sherpa.models import PowLaw1D, TableModel
from sherpa.estmethods import Covariance
from sherpa.optmethods import NelderMead
from sherpa.stats import Cash
from sherpa.fit import Fit
import sherpa
import os
cube_dir = Path(os.getcwd())
counts_3d = SkyCube.read(cube_dir / 'counts_cube.fits')
cube
cube = counts.to_sherpa_data3d(dstype='Data3DInt')
background
bkg_3d = SkyCube.read(cube_dir / 'bkg_cube.fits')
cube_dir = Path('$GAMMAPY_EXTRA/test_datasets/cube')
bkg_3d = SkyCube.read(cube_dir / 'bkg_cube.fits')
background
bkg_3d
bkg = TableModel('bkg')
bkg.load(None, background.data.value.ravel())
bkg.ampl = 1
bkg.ampl.freeze()
i_nan = np.where(np.isnan(exposure.data))
exposure.data[i_nan] = 0
# In order to have the exposure in cm2 s
from gammapy.cube import exposure_cube, SkyCube
from gammapy.utils.energy import EnergyBounds

dirname = '$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2'
log.info('Reading data from {}'.format(dirname))
data_store = DataStore.from_dir(dirname)
obs = data_store.obs(23523)

events = obs.events
log.info('Number of events in event list: {}'.format(len(events)))
log.info('Max. event energy: {}'.format(events['ENERGY'].max()))
log.info('Min. event energy: {}'.format(events['ENERGY'].min()))
aeff = obs.aeff

counts = SkyCube.empty(emin=0.5, emax=80, enumbins=8, eunit='TeV',
                            nxpix=200, nypix=200, xref=events.meta['RA_OBJ'],
                            yref=events.meta['DEC_OBJ'], dtype='int',
                            coordsys='CEL')

log.info('Bin events into cube.')
counts.fill_events(events)
log.info('Counts cube shape: {}'.format(counts.data.shape))
log.info('Number of events in cube: {}'.format(counts.data.sum()))
counts.write('counts.fits.gz', format='fermi-counts', clobber=True)

# Exposure cube
pointing = SkyCoord(events.meta['RA_PNT'], events.meta['DEC_PNT'], "icrs", unit="deg")
livetime = Quantity(events.meta['LIVETIME'], 's')
exposure = exposure_cube(pointing, livetime, aeff2d=aeff, ref_cube=counts,
                         offset_max=Quantity(2.5, 'deg'))
log.info('Exposure cube shape: {}'.format(exposure.data.shape))
log.info('Exposure unit: {}'.format(exposure.data.unit))