Exemple #1
0
    def fit_point(self, model, energy_group, energy_ref):
        from gammapy.spectrum import SpectrumFit

        sherpa_model = model.to_sherpa()
        sherpa_model.gamma.freeze()
        fit = SpectrumFit(self.obs, sherpa_model)

        erange = energy_group.energy_range
        # TODO: Notice channels contained in energy_group
        fit.fit_range = erange.min, 0.9999 * erange.max

        log.debug(
            'Calling Sherpa fit for flux point '
            ' in energy range:\n{}'.format(fit)
        )

        fit.fit()

        res = fit.global_result

        energy_err_hi = energy_group.energy_range.max - energy_ref
        energy_err_lo = energy_ref - energy_group.energy_range.min
        diff_flux = res.model(energy_ref).to('m-2 s-1 TeV-1')
        err = res.model_with_uncertainties(energy_ref.to('TeV').value)
        diff_flux_err = err.s * Unit('m-2 s-1 TeV-1')

        return OrderedDict(
            energy=energy_ref,
            energy_err_hi=energy_err_hi,
            energy_err_lo=energy_err_lo,
            diff_flux=diff_flux,
            diff_flux_err_hi=diff_flux_err,
            diff_flux_err_lo=diff_flux_err,
        )
Exemple #2
0
    def fit_point(self, model, energy_group, energy_ref):
        from gammapy.spectrum import SpectrumFit

        sherpa_model = model.to_sherpa()
        sherpa_model.gamma.freeze()
        fit = SpectrumFit(self.obs, sherpa_model)

        erange = energy_group.energy_range
        # TODO: Notice channels contained in energy_group
        fit.fit_range = erange.min, 0.9999 * erange.max

        log.debug("Calling Sherpa fit for flux point " " in energy range:\n{}".format(fit))

        fit.fit()

        res = fit.global_result

        energy_err_hi = energy_group.energy_range.max - energy_ref
        energy_err_lo = energy_ref - energy_group.energy_range.min
        diff_flux = res.model(energy_ref).to("m-2 s-1 TeV-1")
        err = res.model_with_uncertainties(energy_ref.to("TeV").value)
        diff_flux_err = err.s * u.Unit("m-2 s-1 TeV-1")

        return OrderedDict(
            energy=energy_ref,
            energy_err_hi=energy_err_hi,
            energy_err_lo=energy_err_lo,
            diff_flux=diff_flux,
            diff_flux_err_hi=diff_flux_err,
            diff_flux_err_lo=diff_flux_err,
        )
Exemple #3
0
    def fit_point(self, model, energy_group, energy_ref):
        from gammapy.spectrum import SpectrumFit

        fit = SpectrumFit(self.obs, model)
        erange = energy_group.energy_range

        # TODO: Notice channels contained in energy_group
        fit.fit_range = erange.min, erange.max

        log.debug('Calling Sherpa fit for flux point '
                  ' in energy range:\n{}'.format(fit))

        fit.fit()
        fit.est_errors()

        # First result contain correct model
        res = fit.result[0]

        e_max = energy_group.energy_range.max
        e_min = energy_group.energy_range.min
        diff_flux, diff_flux_err = res.model.evaluate_error(energy_ref)
        return OrderedDict(
            e_ref=energy_ref,
            e_min=e_min,
            e_max=e_max,
            dnde=diff_flux.to('m-2 s-1 TeV-1'),
            dnde_err=diff_flux_err.to('m-2 s-1 TeV-1'),
        )
Exemple #4
0
    def fit_point(self, model, energy_group, energy_ref):
        from gammapy.spectrum import SpectrumFit

        # TODO: The code below won't work because SpectrumFit only accepts
        # gammapy models. Add Parameter class to freeze index
        # sherpa_model = model.to_sherpa()
        # sherpa_model.gamma.freeze()
        #fit = SpectrumFit(self.obs, sherpa_model)
        fit = SpectrumFit(self.obs, model)

        erange = energy_group.energy_range
        # TODO: Notice channels contained in energy_group
        fit.fit_range = erange.min, 0.9999 * erange.max

        log.debug('Calling Sherpa fit for flux point '
                  ' in energy range:\n{}'.format(fit))

        fit.fit()
        fit.est_errors()

        # First result contain correct model
        res = fit.result[0]

        e_max = energy_group.energy_range.max
        e_min = energy_group.energy_range.min
        diff_flux, diff_flux_err = res.model.evaluate_error(energy_ref)
        return OrderedDict(
            e_ref=energy_ref,
            e_min=e_min,
            e_max=e_max,
            dnde=diff_flux.to('m-2 s-1 TeV-1'),
            dnde_err=diff_flux_err.to('m-2 s-1 TeV-1'),
        )
Exemple #5
0
def fit_gammapy():
    """
    Current results

    Parameters:

           name     value     error         unit      min max frozen
        --------- --------- --------- --------------- --- --- ------
            index 2.602e+00 1.555e-01                 nan nan  False
        amplitude 2.441e-11 3.452e-12 1 / (cm2 s TeV) nan nan  False
        reference 1.000e+00 0.000e+00             TeV nan nan   True

    Covariance:

        name/name  index   amplitude
        --------- -------- ---------
            index   0.0242  3.79e-13
        amplitude 3.79e-13  1.19e-23

    Statistic: -157.719 (cash)
    Fit Range: [1.0000000e+09 2.7825594e+10] keV


    """
    obs = SpectrumObservation.read(obs_path)
    # obs.peek()
    # plt.show()
    model = PowerLaw(
        amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1 * u.Unit("TeV"),
        index=2.14 * u.Unit(""),
    )

    fit = SpectrumFit(obs_list=obs, model=model, fit_range=energy_range, stat="cash")
    fit.run()
    print(fit.result[0])
    pprint(fit.__dict__)

    obs = fit.obs_list[0]
    print(obs)
    print("This is fit_gammapy")
    obs.peek()
    import matplotlib.pyplot as plt

    plt.savefig("fit_gammapy.png")
Exemple #6
0
def test_spectral_model_absorbed_by_ebl():
    # Observation parameters
    obs_param = ObservationParameters(alpha=0.2 * u.Unit(''),
                                      livetime=5. * u.h,
                                      emin=0.08 * u.TeV,
                                      emax=12 * u.TeV)

    # Target, PKS 2155-304 from 3FHL
    name = 'test'
    absorption = Absorption.read('$GAMMAPY_EXTRA/datasets/ebl/ebl_dominguez11.fits.gz')
    pwl = PowerLaw(index=3. * u.Unit(''),
                   amplitude=1.e-12 * u.Unit('1/(cm2 s TeV)'),
                   reference=1. * u.TeV)

    input_model = AbsorbedSpectralModel(spectral_model=pwl,
                                        absorption=absorption,
                                        parameter=0.2)

    target = Target(name=name, model=input_model)

    # Performance
    filename = '$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz'
    cta_perf = CTAPerf.read(filename)

    # Simulation
    simu = CTAObservationSimulation.simulate_obs(perf=cta_perf,
                                                 target=target,
                                                 obs_param=obs_param)

    # Model we want to fit
    pwl_model = PowerLaw(index=2.5 * u.Unit(''),
                         amplitude=1.e-12 * u.Unit('1/(cm2 s TeV)'),
                         reference=1. * u.TeV)
    model = AbsorbedSpectralModel(spectral_model=pwl_model,
                                  absorption=absorption,
                                  parameter=0.2)

    # fit
    fit = SpectrumFit(obs_list=SpectrumObservationList([simu]),
                      model=model,
                      stat='wstat')
    fit.fit()
    fit.est_errors()
    result = fit.result[0]
Exemple #7
0
def run_analysis(which):
    log.info(f"Fitting dataset: {which}")

    dataset = config.datasets[which]
    obs_list = dataset.get_SpectrumObservationList()
    fit_range = dataset.energy_range
    log.info(f"obs_list: {obs_list}")

    model = Log10Parabola(
        amplitude="3.8e-11 cm-2 s-1 TeV-1", reference="1 TeV", alpha=2.47, beta=0.24
    )

    fit = SpectrumFit(obs_list=obs_list, model=model, fit_range=fit_range)

    log.info("Running fit")
    fit.run()

    fit_results = make_results_dict(fit)
    utils.write_yaml(fit_results, f"results/fit/fit_{which}.yaml")

    contour_results = compute_contours(fit)
    utils.write_yaml(contour_results, f"results/fit/contours_{which}.yaml")
Exemple #8
0
def test_spectral_model_absorbed_by_ebl():
    # Observation parameters
    obs_param = ObservationParameters(alpha=0.2 * u.Unit(''),
                                      livetime=5. * u.h,
                                      emin=0.08 * u.TeV,
                                      emax=12 * u.TeV)

    # Target, PKS 2155-304 from 3FHL
    name = 'test'
    absorption = Absorption.read(
        '$GAMMAPY_EXTRA/datasets/ebl/ebl_dominguez11.fits.gz')
    pwl = PowerLaw(index=3. * u.Unit(''),
                   amplitude=1.e-12 * u.Unit('1/(cm2 s TeV)'),
                   reference=1. * u.TeV)

    input_model = AbsorbedSpectralModel(spectral_model=pwl,
                                        absorption=absorption,
                                        parameter=0.2)

    target = Target(name=name, model=input_model)

    # Performance
    filename = '$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz'
    cta_perf = CTAPerf.read(filename)

    # Simulation
    simu = CTAObservationSimulation.simulate_obs(perf=cta_perf,
                                                 target=target,
                                                 obs_param=obs_param)

    # Model we want to fit
    pwl_model = PowerLaw(index=2.5 * u.Unit(''),
                         amplitude=1.e-12 * u.Unit('1/(cm2 s TeV)'),
                         reference=1. * u.TeV)
    model = AbsorbedSpectralModel(spectral_model=pwl_model,
                                  absorption=absorption,
                                  parameter=0.2)

    # fit
    fit = SpectrumFit(obs_list=SpectrumObservationList([simu]),
                      model=model,
                      stat='wstat')
    fit.fit()
    fit.est_errors()
    result = fit.result[0]
    SpectrumObservation,
    SpectrumFit,
    DifferentialFluxPoints,
    SpectrumFitResult,
    SpectrumResult
)
import astropy.units as u
import numpy as np
import copy
import matplotlib.pyplot as plt


plt.style.use('ggplot')
obs = SpectrumObservation.read('$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23523.fits')

fit = SpectrumFit(obs)
fit.run()
best_fit = copy.deepcopy(fit.result[0].fit)

# Define Flux points binning
emin = np.log10(obs.lo_threshold.to('TeV').value)
emax = np.log10(40)
binning = np.logspace(emin, emax, 8) * u.TeV

# Fix index
fit.model.gamma.freeze()

# Fit norm in bands
diff_flux = list()
diff_flux_err = list()
e_err_hi = list()
model2fit1.parameters['beta'].parmin = 0.0
model2fit1.parameters['beta'].parmax = 10.0
model2fit1.parameters['amplitude'].parmin = 1e-14
model2fit1.parameters['amplitude'].parmax = 1e-5

model2fit2.parameters['index'].parmin = 0.1
model2fit2.parameters['index'].parmax = 5.0
model2fit2.parameters['lambda_'].parmin = 0.001
model2fit2.parameters['lambda_'].parmax = 100
model2fit2.parameters['amplitude'].parmin = 1.0e-14
model2fit2.parameters['amplitude'].parmax = 1.0e-3

model2fit3.parameters['index'].parmin = 1.0
model2fit3.parameters['index'].parmax = 7.0
model2fit3.parameters['amplitude'].parmin = 1.0e-14
model2fit3.parameters['amplitude'].parmax = 1.0e-4

models2fit = [model2fit3]  #,model2fit2,model2fit3]

for k in range(len(models2fit)):
    fit_crab = SpectrumFit(obs_list=simu,
                           model=models2fit[k],
                           fit_range=(1.0 * u.TeV, 55 * u.TeV),
                           stat="cash")
    fit_crab.fit()
    fit_crab.est_errors()
    results = fit_crab.result
    #ax0, ax1 = results[0].plot(figsize=(8,8))
    #plt.show()
    print(results[0])
Exemple #11
0
# Define spectral model
index = 3.0 * u.Unit('')
amplitude = 2.5 * 1e-11 * u.Unit('cm-2 s-1 TeV-1')
reference = 1 * u.TeV
model = PowerLaw(index=index, amplitude=amplitude, reference=reference)

# for one obs only

mylist = datastore.obs_list((obsid[0], ))
n_obs = 1
seeds = np.arange(n_obs)
sim = SimulationRealBkg(source_model=model, obsrun=mylist[0], obspos=src)
sim.run(seeds)

obs = sim.result[0]
fit = SpectrumFit(obs, model=model, stat='wstat')
fit.model.parameters['index'].value = 2
fit.run()

fit.est_errors()

print fit.result[0]

#now run in a loop

n_obs = 200
mylist = datastore.obs_list(obsid[0:n_obs])
seeds = np.arange(n_obs)
sims = []
for i in range(n_obs):
    sim = SimulationRealBkg(source_model=model, obsrun=mylist[i], obspos=src)
                         livetime=livetime)
sim.simulate_obs(seed=42, obs_id=0)

# In[ ]:

sim.obs.peek()
print(sim.obs)

# ## Spectral analysis
#
# Now that we have some simulated CTA counts spectrum, let's analyse it.

# In[ ]:

# Fit data
fit = SpectrumFit(obs_list=sim.obs, model=model, stat="cash")
fit.run()
result = fit.result[0]

# In[ ]:

print(result)

# In[ ]:

energy_range = [0.1, 100] * u.TeV
model.plot(energy_range=energy_range, energy_power=2)
result.model.plot(energy_range=energy_range, energy_power=2)
result.model.plot_error(energy_range=energy_range, energy_power=2)

# ## Exercises
#aeff.plot(ax=axes[1])
#plt.show()

aeff.lo_threshold = lo_threshold
aeff.hi_threshold = hi_threshold
sim = SpectrumSimulation(aeff=aeff,
                         edisp=edisp,
                         source_model=model,
                         livetime=livetime)
sim.simulate_obs(seed=42, obs_id=0)

sim.obs.peek()
print sim.obs
plt.show()

fit = SpectrumFit(obs_list=sim.obs, model=model, stat='cash')
fit.run()
result = fit.result[0]
print result

energy_range = [0.1, 100] * u.TeV
model.plot(energy_range=energy_range, energy_power=2)
result.model.plot(energy_range=energy_range, energy_power=2)
result.model.plot_error(energy_range=energy_range, energy_power=2)

plt.show()

#now importing a background

#from a background model
bkg_index = 2.5 * u.Unit('')
#print(sim2.result)

sim2 = sim1

Indiv_best_fit_index = []
best_fit_index = []

i = 0

while i < n_obs:
    sim_result = (sim1.result[i], sim2.result[i])
    #FIT SPECTRA:
    pwl.parameters['index'].parmax = 10

    for obs in sim_result:
        fit = SpectrumFit(obs, pwl.copy(), stat='wstat')
        fit.model.parameters['index'].value = 2
        fit.fit()
        fit.est_errors()
        Indiv_best_fit_index.append(
            fit.result[0].model.parameters['index'].value)
        #print(fit.result[0])
        #print(' Indiv_best_fit_index = ', Indiv_best_fit_index)

    #i+=1
    #STACK SPECTRA 2 by 2
    # Add the two spectra
    obs_stacker = SpectrumObservationStacker(sim_result)
    #print('sim_result is the list = ',sim_result)
    obs_stacker.run()
extraction.spectrum_observations[0].peek()

# Now we'll fit a model to the spectrum with SpectrumFit. First we load a power law model with an initial value for the index and the amplitude and then wo do a likelihood fit. The fit results are printed below.

# In[ ]:

model = PowerLaw(index=4,
                 amplitude="1.3e-9 cm-2 s-1 TeV-1",
                 reference="0.02 TeV")

fit_range = (0.04 * u.TeV, 0.4 * u.TeV)
ebounds = EnergyBounds.equal_log_spacing(0.04, 0.4, 7, u.TeV)

joint_fit = SpectrumFit(obs_list=extraction.spectrum_observations,
                        model=model,
                        fit_range=fit_range)
joint_fit.run()
joint_result = joint_fit.result

print(joint_result[0])

# Now you might want to do the stacking here even if in our case there is only one observation which makes it superfluous.
# We can compute flux points by fitting the norm of the global model in energy bands.

# In[ ]:

stacked_obs = extraction.spectrum_observations.stack()
seg = SpectrumEnergyGroupMaker(obs=stacked_obs)

seg.compute_groups_fixed(ebounds=ebounds)
print(pwl)

livetime = 2 * u.h

# In[5]:

sim = SpectrumSimulation(aeff=aeff,
                         edisp=edisp,
                         source_model=pwl,
                         livetime=livetime)
sim.simulate_obs(seed=2309, obs_id=1)
print(sim.obs)

# In[6]:

fit = SpectrumFit(obs_list=sim.obs, model=pwl.copy(), stat='cash')
fit.fit_range = [1, 10] * u.TeV
fit.fit()
fit.est_errors()
print(fit.result[0])

# ## Include background
#
# In this section we will include a background component. Furthermore, we will also simulate more than one observation and fit each one individuallt in order to get average fit results.

# In[7]:

bkg_index = 2.5 * u.Unit('')
bkg_amplitude = 1e-11 * u.Unit('cm-2 s-1 TeV-1')
reference = 1 * u.TeV
from astropy.tests.helper import assert_quantity_allclose

from gammapy.data import ObservationTable
from gammapy.datasets import gammapy_extra
from gammapy.spectrum import SpectrumFit, SpectrumObservationGrouping, group_obs_table

obs_table_file = gammapy_extra.filename(
    'datasets/hess-crab4_pha/observation_table.fits')

obs_table = ObservationTable.read(obs_table_file)

fit = SpectrumFit.from_observation_table(obs_table)
fit.model = 'PL'
fit.energy_threshold_low = '1 TeV'
fit.energy_threshold_high = '10 TeV'
fit.run(method='sherpa')

#Use each obs in one group
obs_table1 = group_obs_table(obs_table, eff_range=[90, 95], n_eff_bin=5)
obs_table1.write('grouped_table1_debug.fits', overwrite=True)

grouping = SpectrumObservationGrouping(obs_table1)
grouping.run()

fit_band2 = SpectrumFit.from_observation_table(grouping.stacked_obs_table)
fit_band2.model = 'PL'
fit_band2.energy_threshold_low = '1 TeV'
fit_band2.energy_threshold_high = '10 TeV'
fit_band2.run(method='sherpa')

assert_quantity_allclose(fit.result.parameters["index"],
# Requires IPython widgets
# _ = extraction.spectrum_observations.peek()

extraction.spectrum_observations[0].peek()

# ## Fit spectrum
#
# Now we'll fit a global model to the spectrum. First we do a joint likelihood fit to all observations. If you want to stack the observations see below. We will also produce a debug plot in order to show how the global fit matches one of the individual observations.

# In[ ]:

model = PowerLaw(index=2,
                 amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"),
                 reference=1 * u.TeV)

joint_fit = SpectrumFit(obs_list=extraction.spectrum_observations, model=model)
joint_fit.run()
joint_result = joint_fit.result

# In[ ]:

ax0, ax1 = joint_result[0].plot(figsize=(8, 8))
ax0.set_ylim(0, 20)
print(joint_result[0])

# ## Compute Flux Points
#
# To round up our analysis we can compute flux points by fitting the norm of the global model in energy bands. We'll use a fixed energy binning for now.

# In[ ]:
    model2fit3.parameters['amplitude'].parmin = 1e-12
    model2fit3.parameters['amplitude'].parmax = 1e-10
    
    model2fit3.parameters['index'].parmin = 2.0
    model2fit3.parameters['index'].parmax = 4.0

    #Models to fit the circular and annular observations
    models_ann_fit = [model2fit1, model2fit2, model2fit3]
    models_circ_fit = [model2fit1, model2fit2, model2fit3]
    
    # Fit
    if est_hi_lim = 'yes':
        hi_fit_energ = cube_on.energies('edges')[int(np.sum(ann_stats))]
    
    for k in range(len(models_ann_fit)):
        fit_source = SpectrumFit(obs_list = ann_sed_table, model=models_ann_fit[k],forward_folded=True, fit_range=(lo_fit_energ,hi_fit_energ))
        fit_source.fit()
        fit_source.est_errors()
        results = fit_source.result
        ax0, ax1 = results[0].plot(figsize=(8,8))
        print(results[0])
    
    if est_hi_lim = 'yes':
        hi_fit_energ = cube_on.energies('edges')[int(np.sum(circ_stats))]

    for k in range(len(models_circ_fit)):
        fit_source = SpectrumFit(obs_list = circ_sed_table, model=models_circ_fit[k],forward_folded=True, fit_range=(lo_fit_energ,hi_fit_energ))
        fit_source.fit()
        fit_source.est_errors()
        results = fit_source.result
        print(results[0])
print(extraction.observations[0])
extraction.run(obs_list=obs_list, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.run(obs_list=crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(obs_list=crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(crablist)
extraction.write(crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(crablist, background_estimator.result, outdir=ANALYSIS_DIR)
get_ipython().magic(u'pinfo extraction.write')
extraction.observations[0].peek()
model = PowerLaw(
    index=2 * u.Unit(''),
    amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),
    reference=1 * u.TeV,
)

joint_fit = SpectrumFit(obs_list=extraction.observations, model=model)
joint_fit.fit()
joint_fit.est_errors()
#fit.run(outdir = ANALYSIS_DIR)

joint_result = joint_fit.result
ax0, ax1 = joint_result[0].plot(figsize=(8,8))
ax0.set_ylim(0, 20)
print(joint_result[0])
plt.show()
ebounds = [0.3, 1.1, 3, 10.1, 30] * u.TeV

stacked_obs = extraction.observations.stack()

seg = SpectrumEnergyGroupMaker(obs=stacked_obs)
seg.compute_range_safe()
Exemple #21
0
# In[44]:

extract.observations[0].peek()

# Now we’ll fit a global model to the spectrum. First we do a joint likelihood fit to all observations.

# In[45]:

model = models.PowerLaw(
    index=2 * u.Unit(''),
    amplitude=1e-11 * u.Unit('cm-2 s-1 TeV-1'),
    reference=1 * u.TeV,
)

fit = SpectrumFit(extract.observations,
                  model,
                  fit_range=(1 * u.TeV, 10 * u.TeV))
#probably not working
fit.fit()
fit.est_errors()
print(fit.result[0])

# We will also produce a debug plot in order to show how the global fit matches one of the individual observations.

# In[46]:

fit.result[0].plot()

# we can compute flux points by fitting the norm of the global model in energy bands. We’ll use a fixed energy binning for now.

# In[47]:
 def make_fit(self, name):
     dataset = config.datasets[name]
     observations = dataset.get_SpectrumObservationList()
     model = Log10ParabolaEnergyScale()
     fit_range = dataset.energy_range
     return SpectrumFit(obs_list=observations, model=model, fit_range=fit_range)
)
extract.run()

# ### Model fit
#
# The next step is to fit a spectral model, using all data (i.e. a "global" fit, using all energies).

# In[26]:

model = models.PowerLaw(
    index=2 * u.Unit(''),
    amplitude=1e-11 * u.Unit('cm-2 s-1 TeV-1'),
    reference=1 * u.TeV,
)

fit = SpectrumFit(extract.observations, model)
fit.fit()
fit.est_errors()
print(fit.result[0])

# ### Spectral points
#
# Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error.

# In[27]:

# Flux points are computed on stacked observation
stacked_obs = extract.observations.stack()
print(stacked_obs)

ebounds = EnergyBounds.equal_log_spacing(1, 40, 4, unit=u.TeV)
Exemple #24
0
def run_gammapy_fit(obs_list, fit_range, eval_contours=None):
    """Run fit with Gammapy, using iminiuit"""
    model_lp = Log10Parabola(
        amplitude=3.80 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1 * u.Unit("TeV"),
        alpha=2.47 * u.Unit(""),
        beta=0.24 * u.Unit(""),
    )

    # note this step is very important to iminuit, to initialize the parameter error!
    fit = SpectrumFit(obs_list=obs_list, model=model_lp, fit_range=fit_range)

    log.info("Starting fit ...")
    t = time()
    fit.optimize(backend="minuit", pedantic=True)
    fit.run()
    t = time() - t
    log.info(f"Finished fit in {t} seconds.")

    results = extract_spectrum_results_gammapy(fit)
    print(fit.result[0])

    if eval_contours is not None:
        log.info("storing likelihood contours ...")
        # points along the contour
        like_points = 80
        sigma = 1.

        log.info(f"computing amplitude vs alpha {sigma} sigma contour")
        cont = fit.minuit.mncontour("par_000_amplitude",
                                    "par_002_alpha",
                                    numpoints=like_points,
                                    sigma=sigma)
        # the third element of mncontour's returned object is a list of tuples with the contour coordinates
        # (x_1,y_1), ..., (x_n, y_n)]
        cont = np.asarray(cont[2])
        amplitude = cont.T[0]  # transpose and take the first row
        alpha = cont.T[1]  # transpose and take the
        # trick to make a close circle when plotting: just repeat the first coordinate
        amplitude = np.append(amplitude, amplitude[0])
        alpha = np.append(alpha, alpha[0])
        contour_amplitude_alpha = {"amplitude": amplitude, "alpha": alpha}

        log.info(f"computing amplitude vs beta {sigma} sigma contour")
        cont = fit.minuit.mncontour("par_000_amplitude",
                                    "par_003_beta",
                                    numpoints=like_points,
                                    sigma=sigma)
        cont = np.asarray(cont[2])
        amplitude = cont.T[0]  # transpose and take the first row
        beta = cont.T[1]  # transpose and take the
        # trick to make a close circle when plotting: just repeat the first coordinate
        amplitude = np.append(amplitude, amplitude[0])
        beta = np.append(beta, beta[0])
        contour_amplitude_beta = {"amplitude": amplitude, "beta": beta}

        log.info(f"computing alpha vs beta {sigma} sigma contour")
        cont = fit.minuit.mncontour("par_002_alpha",
                                    "par_003_beta",
                                    numpoints=like_points,
                                    sigma=sigma)
        cont = np.asarray(cont[2])
        alpha = cont.T[0]  # transpose and take the first row
        beta = cont.T[1]  # transpose and take the
        # trick to make a close circle when plotting: just repeat the first coordinate
        alpha = np.append(alpha, alpha[0])
        beta = np.append(beta, beta[0])
        contour_alpha_beta = {"alpha": alpha, "beta": beta}

        # define the general dictionary and dump it in a .npy object
        contours = {
            "contour_amplitude_alpha": contour_amplitude_alpha,
            "contour_amplitude_beta": contour_amplitude_beta,
            "contour_alpha_beta": contour_alpha_beta,
        }
        logging.info(f"storing .yaml with contours in {eval_contours}")
        path = Path(eval_contours)
        path.mkdir(parents=True, exist_ok=True)
        np.save(f"{eval_contours}/fit_{sigma}_sigma_contours_logparabola.npy",
                contours)

        return results
Exemple #25
0
sim.simulate_obs(seed=42, obs_id=0)
sim = SpectrumSimulation(aeff=aeff,
                         edisp=edisp,
                         source_model=model,
                         livetime=livetime,
                         background_model=none, alpha=alpha)
sim = SpectrumSimulation(aeff=aeff,
                         edisp=edisp,
                         source_model=model,
                         livetime=livetime,
                         background_model="none", alpha=alpha)
sim.simulate_obs(seed=42, obs_id=0)
sim = SpectrumSimulation(aeff=aeff, edisp=edisp, source_model=model, livetime=livetime)
sim.simulate_obs(seed=42, obs_id=0)
get_ipython().magic(u'pinfo SpectrumFit')
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=inbuiltbkg, stat='cash')
fit.run()
get_ipython().magic(u'pinfo SpectrumFit')
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=inbuiltbkg, stat='wstat')
bkg_model
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=bkg_model, stat='wstat')
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=background_estimator1, stat='wstat')
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=background_estimator, stat='wstat')
background_estimator1.run()
fit = SpectrumFit(obs_list=sim.obs, model=model, background_model=background_estimator1, stat='wstat')
background_estimator1.plot()
plt.show()
background_estimator.obs_list
print background_estimator.obs_list
print background_estimator1.obs_list
background_estimator.plot()
TODO: Refactor and add to FluxPointsComputation class or so
"""

from gammapy.spectrum import (SpectrumObservation, SpectrumFit, FluxPoints,
                              SpectrumResult)
import astropy.units as u
from astropy.table import Table
import numpy as np
import copy
import matplotlib.pyplot as plt

plt.style.use('ggplot')
obs = SpectrumObservation.read(
    '$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23523.fits')

fit = SpectrumFit(obs)
fit.run()
best_fit = copy.deepcopy(fit.result[0].fit)

# Define Flux points binning
emin = np.log10(obs.lo_threshold.to('TeV').value)
emax = np.log10(40)
binning = np.logspace(emin, emax, 8) * u.TeV

# Fix index
fit.model.gamma.freeze()

# Fit norm in bands
diff_flux = list()
diff_flux_err = list()
e_err_hi = list()
Exemple #27
0
from gammapy.spectrum import SpectrumFit, SpectrumObservation, SpectrumFitResult
import matplotlib.pyplot as plt
import astropy.units as u

obs = SpectrumObservation.read('pha_obs31415.fits')
fit = SpectrumFit(obs)

obs.peek()
plt.savefig('observation.png')
plt.cla()

fit.run()
fit.result[0].plot_fit()
plt.savefig('debug_fit.png')

# TODO: implement properly
plt.cla()

fig = plt.figure()
ax = fig.add_subplot(111)
fit.result[0].fit.plot_butterfly(ax=ax, label='Fit result')
input_parameters = dict(index = 2.3 * u.Unit(''),
                        norm = 2.5 * 1e-12 * u.Unit('cm-2 s-1 TeV-1'),
                        reference = 1 * u.TeV)
input_parameter_errors = dict(index = 0 * u.TeV,
                              norm = 0 * u.Unit('cm-2 s-1 TeV-1'),
                              reference = 0 * u.TeV)

input_model = SpectrumFitResult(spectral_model = 'PowerLaw',
                                parameters = input_parameters,
                                parameter_errors = input_parameter_errors)
               reference=1 * u.TeV)
print(pwl)

# In[ ]:

livetime = 2 * u.h
sim = SpectrumSimulation(aeff=aeff,
                         edisp=edisp,
                         source_model=pwl,
                         livetime=livetime)
sim.simulate_obs(seed=2309, obs_id=1)
print(sim.obs)

# In[ ]:

fit = SpectrumFit(obs_list=sim.obs, model=pwl.copy(), stat="cash")
fit.fit_range = [1, 10] * u.TeV
fit.run()
print(fit.result[0])

# ## Include background
#
# In this section we will include a background component. Furthermore, we will also simulate more than one observation and fit each one individuallt in order to get average fit results.

# In[ ]:

bkg_model = PowerLaw(index=2.5,
                     amplitude=1e-11 * u.Unit("cm-2 s-1 TeV-1"),
                     reference=1 * u.TeV)

# In[ ]:
Exemple #29
0
def check_energy_binning_effects():
    """Check how spectral fit results change with energy binnings.

    Actually this is still using the default:

    In [14]: print(analysis.extraction.observations[0].edisp)
    EnergyDispersion
    NDDataArray summary info
    e_true         : size =   108, min =  0.010 TeV, max = 301.416 TeV
    e_reco         : size =    72, min =  0.011 TeV, max = 93.804 TeV
    Data           : size =  7776, min =  0.000, max =  1.000

    But now, the fit results from SpectrumAnalysisIACT, which is just
    driving Fitspectrum, are different, almost the same as Sherpa.


    Current results

    Parameters:

           name     value     error         unit      min max frozen
        --------- --------- --------- --------------- --- --- ------
            index 2.620e+00 1.540e-01                 nan nan  False
        amplitude 3.849e-11 5.407e-12 1 / (cm2 s TeV) nan nan  False
        reference 1.000e+00 0.000e+00             TeV nan nan   True

    Covariance:

        name/name  index   amplitude
        --------- -------- ---------
            index   0.0237  5.85e-13
        amplitude 5.85e-13  2.92e-23

    Statistic: -157.166 (cash)
    Fit Range: [ 1.         27.82559402] TeV


    ???
    """
    data_store = DataStore.from_dir("data/hess")
    obs_list = data_store.obs_list([23523])
    on_region = CircleSkyRegion(conf.crab_position, conf.on_radius["hess"])
    fp_binning = [1, 10, 30] * u.TeV
    exclusion_mask = get_exclusion_mask(conf.crab_position)
    model = PowerLaw(
        amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1 * u.Unit("TeV"),
        index=2.14 * u.Unit(""),
    )
    cfg = dict(
        outdir=None,
        background=dict(
            on_region=on_region,
            exclusion_mask=exclusion_mask,
            # min_distance=0.1 * u.rad,
        ),
        extraction=dict(containment_correction=True),
        fit=dict(
            model=model,
            stat="cash",
            # forward_folded=True,
            fit_range=energy_range,
        ),
        fp_binning=fp_binning,
    )
    analysis = SpectrumAnalysisIACT(obs_list, cfg)
    analysis.run()
    analysis.fit.est_errors()
    print("BEFORE", analysis.fit.result[0])
    # print(analysis.extraction.observations[0].edisp)
    # pprint(analysis.fit.__dict__)
    # obs = analysis.fit.obs_list[0]
    # print(obs)
    # # import IPython; IPython.embed()
    # obs.peek()
    # import matplotlib.pyplot as plt
    # plt.savefig('check_energy_binning_effects.png')
    # print('This is check_energy_binning_effects')

    # Try to see if the I/O causes a change in results.
    analysis.extraction.observations.write("temp123")
    analysis.extraction.observations.write("temp123", use_sherpa=True)
    obs = SpectrumObservation.read("temp123/pha_obs23523.fits")
    model = PowerLaw(
        amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1 * u.Unit("TeV"),
        index=2.14 * u.Unit(""),
    )

    fit = SpectrumFit(obs_list=obs, model=model, fit_range=energy_range, stat="cash")
    fit.run()
    print("AFTER", fit.result[0])
Exemple #30
0
    def compute(cls, model, obs_list, binning):
        """Compute differential fluxpoints

        The norm of the global model is fit to the
        `~gammapy.spectrum.SpectrumObservationList` in the provided energy
        binning and the differential flux is evaluated at the log bin center.

        TODO : Add upper limit calculation

        Parameters
        ----------
        model : `~gammapy.spectrum.models.SpectralModel`
            Global model
        obs_list : `~gammapy.spectrum.SpectrumObservationList`
            Observations
        binning : `~astropy.units.Quantity`
            Energy binning, see
            :func:`~gammapy.spectrum.utils.calculate_flux_point_binning` for a
            method to get flux points with a minimum significance.
        """
        from gammapy.spectrum import SpectrumFit

        binning = EnergyBounds(binning)
        low_bins = binning.lower_bounds
        high_bins = binning.upper_bounds

        diff_flux = list()
        diff_flux_err = list()
        e_err_hi = list()
        e_err_lo = list()
        energy = list()

        from ..spectrum import models, powerlaw
        from sherpa.models import PowLaw1D

        if isinstance(model, models.PowerLaw):
            temp = model.to_sherpa()
            temp.gamma.freeze()
            sherpa_models = [temp] * binning.nbins
        else:
            sherpa_models = [None] * binning.nbins

        for low, high, sherpa_model in zip(low_bins, high_bins, sherpa_models):
            log.info('Computing flux points in bin [{}, {}]'.format(low, high))

            # Make PowerLaw approximation for higher order models
            if sherpa_model is None:
                flux_low = model(low)
                flux_high = model(high)
                index = powerlaw.power_law_g_from_points(e1=low,
                                                         e2=high,
                                                         f1=flux_low,
                                                         f2=flux_high)

                log.debug('Approximated power law index: {}'.format(index))
                sherpa_model = PowLaw1D('powlaw1d.default')
                sherpa_model.gamma = index
                sherpa_model.gamma.freeze()
                sherpa_model.ref = model.parameters.reference.to('keV')
                sherpa_model.ampl = 1e-20

            fit = SpectrumFit(obs_list, sherpa_model)

            # If 'low' or 'high' fall onto a bin edge of the
            # SpectrumObservation binning, numerical fluctuations can lead to
            # the inclusion of unwanted bins
            correction = 1e-5
            fit.fit_range = ((1 + correction) * low, (1 - correction) * high)
            fit.fit()
            res = fit.global_result

            bin_center = np.sqrt(low * high)
            energy.append(bin_center)
            e_err_hi.append(high - bin_center)
            e_err_lo.append(bin_center - low)
            diff_flux.append(res.model(bin_center).to('m-2 s-1 TeV-1'))
            err = res.model_with_uncertainties(bin_center.to('TeV').value)
            diff_flux_err.append(err.s * Unit('m-2 s-1 TeV-1'))

        return cls.from_arrays(energy=energy,
                               diff_flux=diff_flux,
                               diff_flux_err_hi=diff_flux_err,
                               diff_flux_err_lo=diff_flux_err,
                               energy_err_hi=e_err_hi,
                               energy_err_lo=e_err_lo)
Exemple #31
0
from astropy.tests.helper import assert_quantity_allclose

from gammapy.data import ObservationTable
from gammapy.datasets import gammapy_extra
from gammapy.spectrum import SpectrumFit, SpectrumGrouping, group_obs_table

obs_table_file = gammapy_extra.filename(
    'datasets/hess-crab4_pha/observation_table.fits')

obs_table = ObservationTable.read(obs_table_file)

fit = SpectrumFit.from_observation_table(obs_table)
fit.model = 'PL'
fit.energy_threshold_low = '1 TeV'
fit.energy_threshold_high = '10 TeV'
fit.run(method='sherpa')

#Use each obs in one group
obs_table1 = group_obs_table(obs_table, eff_range=[90, 95], n_eff_bin=5)
obs_table1.write('grouped_table1_debug.fits', overwrite=True)

grouping = SpectrumGrouping(obs_table1)
grouping.run()

fit_band2 = SpectrumFit.from_observation_table(grouping.stacked_obs_table)
fit_band2.model = 'PL'
fit_band2.energy_threshold_low = '1 TeV'
fit_band2.energy_threshold_high = '10 TeV'
fit_band2.run(method='sherpa')

assert_quantity_allclose(fit.result.parameters["index"],