def fit_gammapy(): """ Current results Parameters: name value error unit min max frozen --------- --------- --------- --------------- --- --- ------ index 2.602e+00 1.555e-01 nan nan False amplitude 2.441e-11 3.452e-12 1 / (cm2 s TeV) nan nan False reference 1.000e+00 0.000e+00 TeV nan nan True Covariance: name/name index amplitude --------- -------- --------- index 0.0242 3.79e-13 amplitude 3.79e-13 1.19e-23 Statistic: -157.719 (cash) Fit Range: [1.0000000e+09 2.7825594e+10] keV """ obs = SpectrumObservation.read(obs_path) # obs.peek() # plt.show() model = PowerLaw( amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.Unit("TeV"), index=2.14 * u.Unit(""), ) fit = SpectrumFit(obs_list=obs, model=model, fit_range=energy_range, stat="cash") fit.run() print(fit.result[0]) pprint(fit.__dict__) obs = fit.obs_list[0] print(obs) print("This is fit_gammapy") obs.peek() import matplotlib.pyplot as plt plt.savefig("fit_gammapy.png")
def run_analysis(which): log.info(f"Fitting dataset: {which}") dataset = config.datasets[which] obs_list = dataset.get_SpectrumObservationList() fit_range = dataset.energy_range log.info(f"obs_list: {obs_list}") model = Log10Parabola( amplitude="3.8e-11 cm-2 s-1 TeV-1", reference="1 TeV", alpha=2.47, beta=0.24 ) fit = SpectrumFit(obs_list=obs_list, model=model, fit_range=fit_range) log.info("Running fit") fit.run() fit_results = make_results_dict(fit) utils.write_yaml(fit_results, f"results/fit/fit_{which}.yaml") contour_results = compute_contours(fit) utils.write_yaml(contour_results, f"results/fit/contours_{which}.yaml")
# Now we'll fit a model to the spectrum with SpectrumFit. First we load a power law model with an initial value for the index and the amplitude and then wo do a likelihood fit. The fit results are printed below. # In[ ]: model = PowerLaw(index=4, amplitude="1.3e-9 cm-2 s-1 TeV-1", reference="0.02 TeV") fit_range = (0.04 * u.TeV, 0.4 * u.TeV) ebounds = EnergyBounds.equal_log_spacing(0.04, 0.4, 7, u.TeV) joint_fit = SpectrumFit(obs_list=extraction.spectrum_observations, model=model, fit_range=fit_range) joint_fit.run() joint_result = joint_fit.result print(joint_result[0]) # Now you might want to do the stacking here even if in our case there is only one observation which makes it superfluous. # We can compute flux points by fitting the norm of the global model in energy bands. # In[ ]: stacked_obs = extraction.spectrum_observations.stack() seg = SpectrumEnergyGroupMaker(obs=stacked_obs) seg.compute_groups_fixed(ebounds=ebounds) fpe = FluxPointEstimator(obs=stacked_obs, groups=seg.groups,
# In[ ]: livetime = 2 * u.h sim = SpectrumSimulation(aeff=aeff, edisp=edisp, source_model=pwl, livetime=livetime) sim.simulate_obs(seed=2309, obs_id=1) print(sim.obs) # In[ ]: fit = SpectrumFit(obs_list=sim.obs, model=pwl.copy(), stat="cash") fit.fit_range = [1, 10] * u.TeV fit.run() print(fit.result[0]) # ## Include background # # In this section we will include a background component. Furthermore, we will also simulate more than one observation and fit each one individuallt in order to get average fit results. # In[ ]: bkg_model = PowerLaw(index=2.5, amplitude=1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV) # In[ ]: get_ipython().run_cell_magic(
# _ = extraction.spectrum_observations.peek() extraction.spectrum_observations[0].peek() # ## Fit spectrum # # Now we'll fit a global model to the spectrum. First we do a joint likelihood fit to all observations. If you want to stack the observations see below. We will also produce a debug plot in order to show how the global fit matches one of the individual observations. # In[ ]: model = PowerLaw(index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV) joint_fit = SpectrumFit(obs_list=extraction.spectrum_observations, model=model) joint_fit.run() joint_result = joint_fit.result # In[ ]: ax0, ax1 = joint_result[0].plot(figsize=(8, 8)) ax0.set_ylim(0, 20) print(joint_result[0]) # ## Compute Flux Points # # To round up our analysis we can compute flux points by fitting the norm of the global model in energy bands. We'll use a fixed energy binning for now. # In[ ]: # Define energy binning
SpectrumFit, DifferentialFluxPoints, SpectrumFitResult, SpectrumResult ) import astropy.units as u import numpy as np import copy import matplotlib.pyplot as plt plt.style.use('ggplot') obs = SpectrumObservation.read('$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23523.fits') fit = SpectrumFit(obs) fit.run() best_fit = copy.deepcopy(fit.result[0].fit) # Define Flux points binning emin = np.log10(obs.lo_threshold.to('TeV').value) emax = np.log10(40) binning = np.logspace(emin, emax, 8) * u.TeV # Fix index fit.model.gamma.freeze() # Fit norm in bands diff_flux = list() diff_flux_err = list() e_err_hi = list() e_err_lo = list()
def check_energy_binning_effects(): """Check how spectral fit results change with energy binnings. Actually this is still using the default: In [14]: print(analysis.extraction.observations[0].edisp) EnergyDispersion NDDataArray summary info e_true : size = 108, min = 0.010 TeV, max = 301.416 TeV e_reco : size = 72, min = 0.011 TeV, max = 93.804 TeV Data : size = 7776, min = 0.000, max = 1.000 But now, the fit results from SpectrumAnalysisIACT, which is just driving Fitspectrum, are different, almost the same as Sherpa. Current results Parameters: name value error unit min max frozen --------- --------- --------- --------------- --- --- ------ index 2.620e+00 1.540e-01 nan nan False amplitude 3.849e-11 5.407e-12 1 / (cm2 s TeV) nan nan False reference 1.000e+00 0.000e+00 TeV nan nan True Covariance: name/name index amplitude --------- -------- --------- index 0.0237 5.85e-13 amplitude 5.85e-13 2.92e-23 Statistic: -157.166 (cash) Fit Range: [ 1. 27.82559402] TeV ??? """ data_store = DataStore.from_dir("data/hess") obs_list = data_store.obs_list([23523]) on_region = CircleSkyRegion(conf.crab_position, conf.on_radius["hess"]) fp_binning = [1, 10, 30] * u.TeV exclusion_mask = get_exclusion_mask(conf.crab_position) model = PowerLaw( amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.Unit("TeV"), index=2.14 * u.Unit(""), ) cfg = dict( outdir=None, background=dict( on_region=on_region, exclusion_mask=exclusion_mask, # min_distance=0.1 * u.rad, ), extraction=dict(containment_correction=True), fit=dict( model=model, stat="cash", # forward_folded=True, fit_range=energy_range, ), fp_binning=fp_binning, ) analysis = SpectrumAnalysisIACT(obs_list, cfg) analysis.run() analysis.fit.est_errors() print("BEFORE", analysis.fit.result[0]) # print(analysis.extraction.observations[0].edisp) # pprint(analysis.fit.__dict__) # obs = analysis.fit.obs_list[0] # print(obs) # # import IPython; IPython.embed() # obs.peek() # import matplotlib.pyplot as plt # plt.savefig('check_energy_binning_effects.png') # print('This is check_energy_binning_effects') # Try to see if the I/O causes a change in results. analysis.extraction.observations.write("temp123") analysis.extraction.observations.write("temp123", use_sherpa=True) obs = SpectrumObservation.read("temp123/pha_obs23523.fits") model = PowerLaw( amplitude=1.23 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.Unit("TeV"), index=2.14 * u.Unit(""), ) fit = SpectrumFit(obs_list=obs, model=model, fit_range=energy_range, stat="cash") fit.run() print("AFTER", fit.result[0])
def run_gammapy_fit(obs_list, fit_range, eval_contours=None): """Run fit with Gammapy, using iminiuit""" model_lp = Log10Parabola( amplitude=3.80 * 1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.Unit("TeV"), alpha=2.47 * u.Unit(""), beta=0.24 * u.Unit(""), ) # note this step is very important to iminuit, to initialize the parameter error! fit = SpectrumFit(obs_list=obs_list, model=model_lp, fit_range=fit_range) log.info("Starting fit ...") t = time() fit.optimize(backend="minuit", pedantic=True) fit.run() t = time() - t log.info(f"Finished fit in {t} seconds.") results = extract_spectrum_results_gammapy(fit) print(fit.result[0]) if eval_contours is not None: log.info("storing likelihood contours ...") # points along the contour like_points = 80 sigma = 1. log.info(f"computing amplitude vs alpha {sigma} sigma contour") cont = fit.minuit.mncontour("par_000_amplitude", "par_002_alpha", numpoints=like_points, sigma=sigma) # the third element of mncontour's returned object is a list of tuples with the contour coordinates # (x_1,y_1), ..., (x_n, y_n)] cont = np.asarray(cont[2]) amplitude = cont.T[0] # transpose and take the first row alpha = cont.T[1] # transpose and take the # trick to make a close circle when plotting: just repeat the first coordinate amplitude = np.append(amplitude, amplitude[0]) alpha = np.append(alpha, alpha[0]) contour_amplitude_alpha = {"amplitude": amplitude, "alpha": alpha} log.info(f"computing amplitude vs beta {sigma} sigma contour") cont = fit.minuit.mncontour("par_000_amplitude", "par_003_beta", numpoints=like_points, sigma=sigma) cont = np.asarray(cont[2]) amplitude = cont.T[0] # transpose and take the first row beta = cont.T[1] # transpose and take the # trick to make a close circle when plotting: just repeat the first coordinate amplitude = np.append(amplitude, amplitude[0]) beta = np.append(beta, beta[0]) contour_amplitude_beta = {"amplitude": amplitude, "beta": beta} log.info(f"computing alpha vs beta {sigma} sigma contour") cont = fit.minuit.mncontour("par_002_alpha", "par_003_beta", numpoints=like_points, sigma=sigma) cont = np.asarray(cont[2]) alpha = cont.T[0] # transpose and take the first row beta = cont.T[1] # transpose and take the # trick to make a close circle when plotting: just repeat the first coordinate alpha = np.append(alpha, alpha[0]) beta = np.append(beta, beta[0]) contour_alpha_beta = {"alpha": alpha, "beta": beta} # define the general dictionary and dump it in a .npy object contours = { "contour_amplitude_alpha": contour_amplitude_alpha, "contour_amplitude_beta": contour_amplitude_beta, "contour_alpha_beta": contour_alpha_beta, } logging.info(f"storing .yaml with contours in {eval_contours}") path = Path(eval_contours) path.mkdir(parents=True, exist_ok=True) np.save(f"{eval_contours}/fit_{sigma}_sigma_contours_logparabola.npy", contours) return results