Exemplo n.º 1
0
    def __init__(self, roi, which, **kwargs):
        """ Compute an upper limit on the source extension, by the "PDG Method". """
        keyword_options.process(self, kwargs)

        self.roi = roi
        self.which = which

        self.init_ts = roi.TS(which, quick=False)

        if self.init_ts < 4:
            # Bunt on extension upper limits for completely insignificant sources
            print 'Unable to compute extension upper limit for point-like source with too-small TS'
            self.extension_limit = None

        else:
            if not isinstance(self.spatial_model, type):
                raise Exception(
                    "The spatial model bust be a type, like Gaussian, not an instance, like Gaussian()"
                )

            # Note, since the input is the class, not the instance, the
            # position parmaeters have not yet been added on.
            n = self.spatial_model.param_names
            assert len(n) == 1 and n[0] == 'Sigma'

            self.saved_state = PointlikeState(roi)

            self.spatial_low_lim, self.spatial_hi_lim = self.spatial_model.default_limits[
                0]

            results = self._compute()

            self.saved_state.restore()
Exemplo n.º 2
0
    def _calculate(self):

        roi = self.roi
        name = self.name
        param_name = self.param_name

        self.init_state = PointlikeState(roi)

        self.results = dict(
            name=name,
            param_name=param_name,
            param_vals=self.param_vals,
            grid=[])

        if self.verbosity:
            print 'Performing grid over parameter %s for source %s' % (name, param_name)

        best_state = None
        self.best_ll = -np.inf

        model = roi.get_model(which=name)
        old_free = model.get_free(param_name)

        for i,p in enumerate(self.param_vals):
            if self.verbosity:
                print 'looping for param %s=%s (%d/%d)' % (param_name, p, i+1,len(self.param_vals))

            self.init_state.restore(just_spectra=True)

            model = roi.get_model(which=name)
            model[param_name]=p
            model.set_free(param_name,False)

            roi.modify(which=name, model=model, keep_old_flux=False)

            if self.verbosity:
                roi.print_summary()
            roi.fit(**self.fit_kwargs)
            if self.verbosity:
                roi.print_summary()

            d=source_dict(roi,name, energy_units=self.energy_units, flux_units=self.flux_units)
            self.results['grid'].append(d)

            ll = self.results['grid'][-1]['logLikelihood']

            if ll > self.best_ll:
                self.best_state = PointlikeState(roi)
                self.best_ll = ll
                self.best_d = d

        self.results['best'] = self.best_d

        if self.keep_best:
            self.best_state.restore(just_spectra=True)
            model = roi.get_model(which=name)
            model.set_free(param_name,old_free)
            roi.modify(which=name, model=model, keep_old_flux=False)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 3
0
    def _compute(self):
        roi = self.roi
        which = self.which

        state = PointlikeState(roi)
        ll_0 = roi.logLikelihood(roi.parameters())

        source = roi.get_source(which)

        if self.verbosity:
            print 'Computing upper limit for source %s with %s spectral model' % (
                source.name, source.model.name)

        if not hasattr(source, 'model'):
            raise Exception(
                "upper_limit can only calculate upper limits of point and extended sources."
            )
        model = source.model

        integral_min, integral_max = self.get_integration_range(model)

        if self.verbosity:
            print 'For source %s, setting integration range from' % model.name
            print ' * integration minimum = :', integral_min
            print ' * integration maximum = :', integral_max

        # Unbound flux temporarily to avoid parameter limits
        model.set_mapper(0, LinearMapper)

        def like(norm):
            model.setp(0, norm)
            return np.exp(ll_0 - roi.logLikelihood(roi.parameters()))

        npoints = int(
            math.ceil(self.simps_points *
                      (np.log10(integral_max) - np.log10(integral_min))))
        points = np.logspace(np.log10(integral_min), np.log10(integral_max),
                             npoints * 2 + 1)
        y = np.array([like(x) * x for x in points])
        trapz1 = integrate.cumtrapz(y[::2])
        trapz2 = integrate.cumtrapz(y)[::2]
        cumsimps = (4 * trapz2 - trapz1) / 3.
        cumsimps /= cumsimps[-1]
        i1 = np.where(cumsimps < .95)[0][-1]
        i2 = np.where(cumsimps > .95)[0][0]
        x1, x2 = points[::2][i1], points[::2][i2]
        y1, y2 = cumsimps[i1], cumsimps[i2]
        #Linear interpolation should be good enough at this point
        limit = x1 + ((x2 - x1) / (y2 - y1)) * (self.confidence - y1)
        model.setp(0, limit)
        self.uflux = model.i_flux(**self.flux_kwargs)

        self.upper_limit_model = model.copy()

        state.restore(just_spectra=True)
Exemplo n.º 4
0
    def __init__(self, roi, which, model0, model1, **kwargs):
        keyword_options.process(self, kwargs)

        state = PointlikeState(roi)

        self.roi = roi
        self.which = which
        self.model0 = model0.copy()
        self.model1 = model1.copy()

        self.compute()
        state.restore()
Exemplo n.º 5
0
    def __init__(self, roi, which, model0, model1, **kwargs):
        keyword_options.process(self, kwargs)

        state = PointlikeState(roi)

        self.roi=roi
        self.which=which
        self.model0=model0.copy()
        self.model1=model1.copy()

        self.compute()
        state.restore()
Exemplo n.º 6
0
    def _compute(self):
        if self.verbosity: print 'calculating pointlike cutoff upper limit'

        roi = self.roi
        name = self.name

        saved_state = PointlikeState(roi)

        cutoff_model = PLSuperExpCutoff(Index=self.Index, Cutoff=self.Cutoff, b=self.b)
        roi.modify(which=name, model=cutoff_model, keep_old_flux=True)

        super(PointlikeCutoffUpperLimit,self)._compute()

        saved_state.restore(just_spectra=True)
Exemplo n.º 7
0
    def fill(self):

        roi = self.roi

        state = PointlikeState(roi)

        if not roi.quiet: print 'Calculating extension profile for %s' % self.source.name

        init_p = roi.get_parameters().copy()

        # Keep the TS function quiet
        old_quiet = roi.quiet
        roi.quiet=True

        sigma = self.spatial_model['sigma']
        sigma_err = self.spatial_model.error('sigma')

        upper_limit = min(sigma + max(3*sigma_err,sigma),3) if self.upper_limit is None else self.upper_limit

        # make the bottom point ~ 0.1xfirst point
        lower_limit = float(upper_limit)/self.num_points/10.0 if self.lower_limit is None else self.lower_limit

        self.extension_list=np.linspace(lower_limit,upper_limit,self.num_points)

        self.TS_spectral=np.empty_like(self.extension_list)
        self.TS_bandfits=np.empty_like(self.extension_list)

        roi.setup_energy_bands()

        if not old_quiet: print '%20s %20s %20s' % ('sigma','TS_spectral','TS_bandfits')
        for i,sigma in enumerate(self.extension_list):
            roi.modify(which=self.which, sigma=sigma)

            roi.fit(**self.fit_kwargs)

            params=roi.parameters()
            ll_a=-1*roi.logLikelihood(roi.parameters())

            roi.update_counts(init_p)
            roi.fit(**self.fit_kwargs)
            ll_b=-1*roi.logLikelihood(roi.parameters())
            if ll_a > ll_b: roi.update_counts(params)

            self.TS_spectral[i]=roi.TS(**self.ts_kwargs)
            self.TS_bandfits[i]=roi.TS(bandfits=True,**self.ts_kwargs)

            if not old_quiet: print 'sigma=%.2f ts_spec=%.1f, ts_band=%.1f' % (sigma, self.TS_spectral[i],self.TS_bandfits[i])
        
        state.restore()
Exemplo n.º 8
0
    def _compute(self):

        roi = self.roi
        name = self.name
        model = roi.get_model(name)

        saved_state = PointlikeState(roi)

        try:
            ful = FluxUpperLimit(roi=roi, 
                                 which=name, 
                                 confidence=self.cl, 
                                 simps_points=self.simps_points,
                                 verbosity=self.verbosity)
            model = ful.upper_limit_model

            self.results  = pointlike_model_to_flux(model, 
                                                    emin=self.emin, emax=self.emax, 
                                                    flux_units=self.flux_units, 
                                                    energy_units=self.energy_units, 
                                                    errors=False,
                                                    include_prefactor=self.include_prefactor,
                                                    prefactor_energy=self.prefactor_energy,
                                                   )
            self.results['confidence'] = self.cl

            self.results['spectrum'] = spectrum_to_dict(model)

        except Exception, ex:
            print 'ERROR pointlike upper limit: ', ex
            traceback.print_exc(file=sys.stdout)
            self.results = None
Exemplo n.º 9
0
    def _calculate(self):

        self.results = dict()

        self.init_state = PointlikeState(self.roi)

        model = self.roi.get_model(self.name)
        init_mapper = model.get_mapper(self.param_name)
        param_val = model[self.param_name]

        assert param_val >= self.param_min and param_val <= self.param_max

        if isinstance(init_mapper, LimitMapper):
            assert init_mapper.min <= self.param_min and init_mapper.max >= self.param_max

        model.set_mapper(
            self.param_name,
            LimitMapper(self.param_min, self.param_max, scale=param_val))

        self.roi.modify(which=self.name, model=model)

        if self.verbosity:
            print 'Before fit'
            self.roi.print_summary()

        self.results['fit_before'] = source_dict(
            self.roi,
            self.name,
            energy_units=self.energy_units,
            flux_units=self.flux_units)

        self.roi.fit(**self.fit_kwargs)

        if self.verbosity:
            print 'After fit'
            self.roi.print_summary()

        self.results['fit_after'] = source_dict(self.roi,
                                                self.name,
                                                energy_units=self.energy_units,
                                                flux_units=self.flux_units)

        if self.keep_best:
            model = self.roi.get_model(self.name)
            model.set_mapper(self.param_name, init_mapper)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 10
0
    def __init__(self, roi, name, *args, **kwargs):
        self.roi = roi
        keyword_options.process(self, kwargs)

        self.pointlike_fit_kwargs = dict(use_gradient=False)

        self.name = name

        self._setup_savedir()

        self._setup_time_bins()

        saved_state = PointlikeState(roi)

        self._test_variability()

        saved_state.restore()
Exemplo n.º 11
0
    def __init__(self, roi, name, *args, **kwargs):
        self.roi = roi
        keyword_options.process(self, kwargs)

        self.pointlike_fit_kwargs = dict(use_gradient=False)

        self.name = name

        self._setup_savedir()

        self._setup_time_bins()

        saved_state = PointlikeState(roi)

        self._test_variability()

        saved_state.restore()
Exemplo n.º 12
0
    def _compute(self):
        if self.verbosity: 
            print 'Calculating pointlike upper limit'

        roi = self.roi
        name = self.name

        saved_state = PointlikeState(roi)

        """ Note keep old flux, because it is important to have
            the spectral model pushed into the upper_limit
            code reasonably close to the best fit flux. This
            is because initial likelihood (ll_0) is used to scale
            the likelihood so it has to be reasonably close to 
            the best value. """
        model = PowerLaw(index=self.powerlaw_index)
        roi.modify(which=name, model=model, keep_old_flux=True)

        super(PointlikePowerLawUpperLimit,self)._compute()

        saved_state.restore(just_spectra=True)
Exemplo n.º 13
0
    def __init__(self, roi, bin_edges, nrows=1, grid_kwargs=dict(), **kwargs):

        default_grid_kwargs = dict(axes_pad=0.1,
                                   cbar_location="top",
                                   cbar_mode="each",
                                   cbar_size="7%",
                                   cbar_pad="2%")

        self.grid_kwargs = default_grid_kwargs.copy()
        self.grid_kwargs.update(grid_kwargs)

        self.roi = roi
        keyword_options.process(self, kwargs)
        self.nrows = nrows

        self.bin_edges = bin_edges
        self.nplots = len(self.bin_edges) - 1
        self.ncols = int(math.ceil(float(self.nplots) / self.nrows))

        for e in bin_edges:
            if not np.any(np.abs(e - roi.bin_edges) < 0.5):
                raise Exception(
                    "Energy %.1f inconsistent with ROI energy binning." % e)

        self.lower_energies = bin_edges[:-1]
        self.upper_energies = bin_edges[1:]

        state = PointlikeState(roi)

        # step 1, test consistentcy of each energy with binning in pointlike

        kwargs['title'] = ''  # dont title the subplots
        self.maps = []
        for i, (lower, upper) in enumerate(
                zip(self.lower_energies, self.upper_energies)):
            roi.change_binning(fit_emin=lower, fit_emax=upper)
            self.maps.append(self.object(roi, **kwargs))

        state.restore()
Exemplo n.º 14
0
    def __init__(self,roi,bin_edges,nrows=1,grid_kwargs=dict(),**kwargs):

        default_grid_kwargs = dict(axes_pad=0.1, 
                                   cbar_location="top",
                                   cbar_mode="each",
                                   cbar_size="7%",
                                   cbar_pad="2%")

        self.grid_kwargs = default_grid_kwargs.copy()
        self.grid_kwargs.update(grid_kwargs)

        self.roi = roi
        keyword_options.process(self, kwargs)
        self.nrows=nrows

        self.bin_edges = bin_edges
        self.nplots = len(self.bin_edges)-1
        self.ncols= int(math.ceil(float(self.nplots)/self.nrows))

        for e in bin_edges:
            if not np.any(np.abs(e-roi.bin_edges) < 0.5):
                raise Exception("Energy %.1f inconsistent with ROI energy binning." % e)

        self.lower_energies = bin_edges[:-1]
        self.upper_energies = bin_edges[1:]

        state = PointlikeState(roi)
 
        # step 1, test consistentcy of each energy with binning in pointlike

        kwargs['title'] = '' # dont title the subplots
        self.maps = []
        for i,(lower,upper) in enumerate(zip(self.lower_energies, self.upper_energies)):
            roi.change_binning(fit_emin=lower,fit_emax=upper)
            self.maps.append(self.object(roi,**kwargs))

        state.restore()
Exemplo n.º 15
0
    def _calculate(self):

        self.results=dict()

        self.init_state = PointlikeState(self.roi)

        model = self.roi.get_model(self.name)
        init_mapper = model.get_mapper(self.param_name)
        param_val = model[self.param_name]

        assert param_val >= self.param_min and param_val <= self.param_max

        if isinstance(init_mapper,LimitMapper):
            assert init_mapper.min <= self.param_min and init_mapper.max >= self.param_max


        model.set_mapper(self.param_name,LimitMapper(self.param_min, self.param_max, scale=param_val))

        self.roi.modify(which=self.name, model=model)

        if self.verbosity:
            print 'Before fit'
            self.roi.print_summary()

        self.results['fit_before']=source_dict(self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units)

        self.roi.fit(**self.fit_kwargs)

        if self.verbosity:
            print 'After fit'
            self.roi.print_summary()

        self.results['fit_after']=source_dict(self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units)

        if self.keep_best:
            model = self.roi.get_model(self.name)
            model.set_mapper(self.param_name,init_mapper)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 16
0
    def _calculate(self):
        roi = self.roi
        name = self.name
        
        if self.verbosity: print 'Testing cutoff in pointlike'
        emin,emax=get_full_energy_range(roi)

        self.results = d = dict(
            energy = energy_dict(emin=emin, emax=emax, energy_units=self.energy_units)
        )

        saved_state = PointlikeState(roi)

        old_flux = roi.get_model(name).i_flux(emin,emax)

        if not isinstance(roi.get_model(name),PowerLaw):

            powerlaw_model=PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin*emax))
            powerlaw_model.set_mapper('Index', PowerLaw.default_limits['Index'])
            powerlaw_model.set_flux(old_flux,emin=emin,emax=emax)

            if self.verbosity: print "powerlaw_model is ",powerlaw_model

            roi.modify(which=name, model=powerlaw_model, keep_old_flux=False)

        fit = lambda: roi.fit(**self.fit_kwargs)
        def ts():
            old_quiet = roi.quiet; roi.quiet=True
            ts = roi.TS(name,quick=False)
            roi.quiet = old_quiet
            return ts

        spectrum = lambda: spectrum_to_dict(roi.get_model(name), errors=True)

        if self.verbosity: 
            print 'About to fit powerlaw_model'
            roi.print_summary()

        fit()
        
        if self.verbosity:
            print 'Done fitting powerlaw_model'
            roi.print_summary()

        d['hypothesis_0'] = source_dict(roi, name, emin=emin, emax=emax,
                                        flux_units=self.flux_units,
                                        energy_units=self.energy_units,
                                        verbosity=self.verbosity)

        if self.cutoff_model is not None:
            pass
        else:
            self.cutoff_model=PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1)
            # Note, don't limit the normalization parameter
            for p in ['Index', 'Cutoff', 'b']:
                self.cutoff_model.set_mapper(p, PLSuperExpCutoff.default_limits[p])
            self.cutoff_model.set_free('b', False)
            self.cutoff_model.set_flux(old_flux,emin=emin,emax=emax)

        if self.verbosity: print "cutoff_model is ",self.cutoff_model

        roi.modify(which=name, model=self.cutoff_model, keep_old_flux=False)

        if self.verbosity: 
            print 'About to fit cutoff_model'
            roi.print_summary()

        fit()

        ll = -roi.logLikelihood(roi.parameters())

        if ll < d['hypothesis_0']['logLikelihood']:
            # if fit is worse than PowerLaw fit, then
            # restart fit with parameters almost
            # equal to best fit powerlaw
            self.cutoff_plaw=PLSuperExpCutoff(b=1)
            self.cutoff_plaw.set_free('b', False)
            self.cutoff_plaw.setp('norm', d['hypothesis_0']['spectrum']['Norm'])
            self.cutoff_plaw.setp('index', d['hypothesis_0']['spectrum']['Index'])
            self.cutoff_plaw.setp('e0', d['hypothesis_0']['spectrum']['e0'])
            self.cutoff_plaw.setp('cutoff', 1e6)

            roi.modify(which=name, model=self.cutoff_plaw, keep_old_flux=False)
            fit()

            if self.verbosity: 
                print 'Redoing fit with cutoff same as plaw'
                print 'Before:'
                roi.print_summary()
                print fit()

        if self.verbosity:
            print 'Done fitting cutoff_model'
            roi.print_summary()

        d['hypothesis_1'] = source_dict(roi, name, emin=emin, emax=emax,
                                        flux_units=self.flux_units,
                                        energy_units=self.energy_units,
                                        verbosity=self.verbosity)


        d['TS_cutoff']=d['hypothesis_1']['TS']['noquick']-d['hypothesis_0']['TS']['noquick']

        saved_state.restore()
Exemplo n.º 17
0
                      emax       = 1e6, # MeV  
                      irf        = 'P7SOURCE_V6',
                      roi_dir    = roi_dir,  
                      maxROI     = 10,
                      event_class= 0,
                      minROI     = 10,
                      use_weighted_livetime=True)

roi = sa.roi_from_xml(
    xmlfile=expandvars(join(simdir,"gtlike_model.xml")),
    roi_dir=roi_dir,
    fit_emin=10**1.75, # 56 MeV
    fit_emax=10**3.25, # 1778 MeV
)

state=PointlikeState(roi)

results=dict()

print 'bins',roi.bin_edges

roi.print_summary(galactic=True)

results['pointlike'] = dict()
results['pointlike']['mc'] = sourcedict(roi,which,errors=False)

roi.fit(use_gradient=False, fit_bg_first = True)
results['pointlike']['fit'] = sourcedict(roi,which)

roi.print_summary(galactic=True)
print roi
Exemplo n.º 18
0
class SpectralFitLimited(BaseFitter):

    defaults = BaseFitter.defaults + (
        ('energy_units', 'MeV', 'default units to plot energy flux (y axis) in.'),
        ('flux_units',  'erg', 'default units to plot energy (x axis) in'),
        ('keep_best', True, "keep the best fit"),
        ('fit_kwargs', dict(use_gradient=False), 'kwargs past into roi.fit'),
    )

    @keyword_options.decorate(defaults)
    def __init__(self, roi, name, param_name, param_min, param_max, **kwargs):

        self.roi = roi
        self.name = name
        self.param_name = param_name
        self.param_min = param_min
        self.param_max = param_max

        keyword_options.process(self, kwargs)

        self._calculate()

    def _calculate(self):

        self.results=dict()

        self.init_state = PointlikeState(self.roi)

        model = self.roi.get_model(self.name)
        init_mapper = model.get_mapper(self.param_name)
        param_val = model[self.param_name]

        assert param_val >= self.param_min and param_val <= self.param_max

        if isinstance(init_mapper,LimitMapper):
            assert init_mapper.min <= self.param_min and init_mapper.max >= self.param_max


        model.set_mapper(self.param_name,LimitMapper(self.param_min, self.param_max, scale=param_val))

        self.roi.modify(which=self.name, model=model)

        if self.verbosity:
            print 'Before fit'
            self.roi.print_summary()

        self.results['fit_before']=source_dict(self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units)

        self.roi.fit(**self.fit_kwargs)

        if self.verbosity:
            print 'After fit'
            self.roi.print_summary()

        self.results['fit_after']=source_dict(self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units)

        if self.keep_best:
            model = self.roi.get_model(self.name)
            model.set_mapper(self.param_name,init_mapper)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 19
0
class SpectralGrid(BaseFitter):
    """ Perform a grid over parameters. """

    defaults = BaseFitter.defaults + (
        ('energy_units', 'MeV',
         'default units to plot energy flux (y axis) in.'),
        ('flux_units', 'erg', 'default units to plot energy (x axis) in'),
        ('param_vals', None,
         "List of parameters to grid over. If specified, don't set param_min, param_max, or nparams"
         ),
        ('param_min', None, "min parameter. If set, don't specify param_vals"),
        ('param_max', None, "max parameter. If set, don't specify param_vals"),
        ('nparams', None,
         "Number of params in grid. If set, don't specify param_vals"),
        ('keep_best', True, "keep the best fit"),
        ('fit_kwargs', dict(use_gradient=False), 'kwargs past into roi.fit'),
    )

    @keyword_options.decorate(defaults)
    def __init__(self, roi, name, param_name, **kwargs):

        self.roi = roi
        self.name = name
        self.param_name = param_name

        keyword_options.process(self, kwargs)

        if self.param_vals is not None:
            assert self.param_min is None and self.param_max is None and self.nparams is None
        else:
            assert self.param_min is not None and self.param_max is not None and self.nparams is not None
            self.param_vals = np.linspace(self.param_min, self.param_max,
                                          self.nparams)

        self._calculate()

    def _calculate(self):

        roi = self.roi
        name = self.name
        param_name = self.param_name

        self.init_state = PointlikeState(roi)

        self.results = dict(name=name,
                            param_name=param_name,
                            param_vals=self.param_vals,
                            grid=[])

        if self.verbosity:
            print 'Performing grid over parameter %s for source %s' % (
                name, param_name)

        best_state = None
        self.best_ll = -np.inf

        model = roi.get_model(which=name)
        old_free = model.get_free(param_name)

        for i, p in enumerate(self.param_vals):
            if self.verbosity:
                print 'looping for param %s=%s (%d/%d)' % (
                    param_name, p, i + 1, len(self.param_vals))

            self.init_state.restore(just_spectra=True)

            model = roi.get_model(which=name)
            model[param_name] = p
            model.set_free(param_name, False)

            roi.modify(which=name, model=model, keep_old_flux=False)

            if self.verbosity:
                roi.print_summary()
            roi.fit(**self.fit_kwargs)
            if self.verbosity:
                roi.print_summary()

            d = source_dict(roi,
                            name,
                            energy_units=self.energy_units,
                            flux_units=self.flux_units)
            self.results['grid'].append(d)

            ll = self.results['grid'][-1]['logLikelihood']

            if ll > self.best_ll:
                self.best_state = PointlikeState(roi)
                self.best_ll = ll
                self.best_d = d

        self.results['best'] = self.best_d

        if self.keep_best:
            self.best_state.restore(just_spectra=True)
            model = roi.get_model(which=name)
            model.set_free(param_name, old_free)
            roi.modify(which=name, model=model, keep_old_flux=False)
        else:
            self.init_state.restore(just_spectra=True)

    def plot(self, filename):

        param_vals = self.results['param_vals']
        ll = [i['logLikelihood'] for i in self.results['grid']]

        P.plot(param_vals, ll)
        P.ylabel('logLikelihood')
        P.xlabel(self.results['param_name'])
        P.savefig(filename)
Exemplo n.º 20
0
    def _calculate(self):

        roi = self.roi
        name = self.name
        param_name = self.param_name

        self.init_state = PointlikeState(roi)

        self.results = dict(name=name,
                            param_name=param_name,
                            param_vals=self.param_vals,
                            grid=[])

        if self.verbosity:
            print 'Performing grid over parameter %s for source %s' % (
                name, param_name)

        best_state = None
        self.best_ll = -np.inf

        model = roi.get_model(which=name)
        old_free = model.get_free(param_name)

        for i, p in enumerate(self.param_vals):
            if self.verbosity:
                print 'looping for param %s=%s (%d/%d)' % (
                    param_name, p, i + 1, len(self.param_vals))

            self.init_state.restore(just_spectra=True)

            model = roi.get_model(which=name)
            model[param_name] = p
            model.set_free(param_name, False)

            roi.modify(which=name, model=model, keep_old_flux=False)

            if self.verbosity:
                roi.print_summary()
            roi.fit(**self.fit_kwargs)
            if self.verbosity:
                roi.print_summary()

            d = source_dict(roi,
                            name,
                            energy_units=self.energy_units,
                            flux_units=self.flux_units)
            self.results['grid'].append(d)

            ll = self.results['grid'][-1]['logLikelihood']

            if ll > self.best_ll:
                self.best_state = PointlikeState(roi)
                self.best_ll = ll
                self.best_d = d

        self.results['best'] = self.best_d

        if self.keep_best:
            self.best_state.restore(just_spectra=True)
            model = roi.get_model(which=name)
            model.set_free(param_name, old_free)
            roi.modify(which=name, model=model, keep_old_flux=False)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 21
0
class SpectralFitLimited(BaseFitter):

    defaults = BaseFitter.defaults + (
        ('energy_units', 'MeV',
         'default units to plot energy flux (y axis) in.'),
        ('flux_units', 'erg', 'default units to plot energy (x axis) in'),
        ('keep_best', True, "keep the best fit"),
        ('fit_kwargs', dict(use_gradient=False), 'kwargs past into roi.fit'),
    )

    @keyword_options.decorate(defaults)
    def __init__(self, roi, name, param_name, param_min, param_max, **kwargs):

        self.roi = roi
        self.name = name
        self.param_name = param_name
        self.param_min = param_min
        self.param_max = param_max

        keyword_options.process(self, kwargs)

        self._calculate()

    def _calculate(self):

        self.results = dict()

        self.init_state = PointlikeState(self.roi)

        model = self.roi.get_model(self.name)
        init_mapper = model.get_mapper(self.param_name)
        param_val = model[self.param_name]

        assert param_val >= self.param_min and param_val <= self.param_max

        if isinstance(init_mapper, LimitMapper):
            assert init_mapper.min <= self.param_min and init_mapper.max >= self.param_max

        model.set_mapper(
            self.param_name,
            LimitMapper(self.param_min, self.param_max, scale=param_val))

        self.roi.modify(which=self.name, model=model)

        if self.verbosity:
            print 'Before fit'
            self.roi.print_summary()

        self.results['fit_before'] = source_dict(
            self.roi,
            self.name,
            energy_units=self.energy_units,
            flux_units=self.flux_units)

        self.roi.fit(**self.fit_kwargs)

        if self.verbosity:
            print 'After fit'
            self.roi.print_summary()

        self.results['fit_after'] = source_dict(self.roi,
                                                self.name,
                                                energy_units=self.energy_units,
                                                flux_units=self.flux_units)

        if self.keep_best:
            model = self.roi.get_model(self.name)
            model.set_mapper(self.param_name, init_mapper)
        else:
            self.init_state.restore(just_spectra=True)
Exemplo n.º 22
0
class SpectralGrid(BaseFitter):
    """ Perform a grid over parameters. """

    defaults = BaseFitter.defaults + (
        ('energy_units', 'MeV', 'default units to plot energy flux (y axis) in.'),
        ('flux_units',  'erg', 'default units to plot energy (x axis) in'),
        ('param_vals', None, "List of parameters to grid over. If specified, don't set param_min, param_max, or nparams"),
        ('param_min', None, "min parameter. If set, don't specify param_vals"),
        ('param_max', None, "max parameter. If set, don't specify param_vals"),
        ('nparams', None, "Number of params in grid. If set, don't specify param_vals"),
        ('keep_best', True, "keep the best fit"),
        ('fit_kwargs', dict(use_gradient=False), 'kwargs past into roi.fit'),
    )

    @keyword_options.decorate(defaults)
    def __init__(self, roi, name, param_name, **kwargs):

        self.roi = roi
        self.name = name
        self.param_name = param_name

        keyword_options.process(self, kwargs)

        if self.param_vals is not None:
            assert self.param_min is None and self.param_max is None and self.nparams is None
        else:
            assert self.param_min is not None and self.param_max is not None and self.nparams is not None
            self.param_vals = np.linspace(self.param_min, self.param_max, self.nparams)

        self._calculate()

    def _calculate(self):

        roi = self.roi
        name = self.name
        param_name = self.param_name

        self.init_state = PointlikeState(roi)

        self.results = dict(
            name=name,
            param_name=param_name,
            param_vals=self.param_vals,
            grid=[])

        if self.verbosity:
            print 'Performing grid over parameter %s for source %s' % (name, param_name)

        best_state = None
        self.best_ll = -np.inf

        model = roi.get_model(which=name)
        old_free = model.get_free(param_name)

        for i,p in enumerate(self.param_vals):
            if self.verbosity:
                print 'looping for param %s=%s (%d/%d)' % (param_name, p, i+1,len(self.param_vals))

            self.init_state.restore(just_spectra=True)

            model = roi.get_model(which=name)
            model[param_name]=p
            model.set_free(param_name,False)

            roi.modify(which=name, model=model, keep_old_flux=False)

            if self.verbosity:
                roi.print_summary()
            roi.fit(**self.fit_kwargs)
            if self.verbosity:
                roi.print_summary()

            d=source_dict(roi,name, energy_units=self.energy_units, flux_units=self.flux_units)
            self.results['grid'].append(d)

            ll = self.results['grid'][-1]['logLikelihood']

            if ll > self.best_ll:
                self.best_state = PointlikeState(roi)
                self.best_ll = ll
                self.best_d = d

        self.results['best'] = self.best_d

        if self.keep_best:
            self.best_state.restore(just_spectra=True)
            model = roi.get_model(which=name)
            model.set_free(param_name,old_free)
            roi.modify(which=name, model=model, keep_old_flux=False)
        else:
            self.init_state.restore(just_spectra=True)

    def plot(self, filename):

        param_vals = self.results['param_vals']
        ll = [i['logLikelihood'] for i in self.results['grid']]

        P.plot(param_vals,ll)
        P.ylabel('logLikelihood')
        P.xlabel(self.results['param_name'])
        P.savefig(filename)
Exemplo n.º 23
0
    def _calculate(self):
        roi = self.roi
        name = self.name

        if self.verbosity: print 'Testing cutoff in pointlike'
        emin, emax = get_full_energy_range(roi)

        self.results = d = dict(energy=energy_dict(
            emin=emin, emax=emax, energy_units=self.energy_units))

        saved_state = PointlikeState(roi)

        old_flux = roi.get_model(name).i_flux(emin, emax)

        if not isinstance(roi.get_model(name), PowerLaw):

            powerlaw_model = PowerLaw(norm=1e-11,
                                      index=2,
                                      e0=np.sqrt(emin * emax))
            powerlaw_model.set_mapper('Index',
                                      PowerLaw.default_limits['Index'])
            powerlaw_model.set_flux(old_flux, emin=emin, emax=emax)

            if self.verbosity: print "powerlaw_model is ", powerlaw_model

            roi.modify(which=name, model=powerlaw_model, keep_old_flux=False)

        fit = lambda: roi.fit(**self.fit_kwargs)

        def ts():
            old_quiet = roi.quiet
            roi.quiet = True
            ts = roi.TS(name, quick=False)
            roi.quiet = old_quiet
            return ts

        spectrum = lambda: spectrum_to_dict(roi.get_model(name), errors=True)

        if self.verbosity:
            print 'About to fit powerlaw_model'
            roi.print_summary()

        fit()

        if self.verbosity:
            print 'Done fitting powerlaw_model'
            roi.print_summary()

        d['hypothesis_0'] = source_dict(roi,
                                        name,
                                        emin=emin,
                                        emax=emax,
                                        flux_units=self.flux_units,
                                        energy_units=self.energy_units,
                                        verbosity=self.verbosity)

        if self.cutoff_model is not None:
            pass
        else:
            self.cutoff_model = PLSuperExpCutoff(norm=1e-9,
                                                 index=1,
                                                 cutoff=1000,
                                                 e0=1000,
                                                 b=1)
            # Note, don't limit the normalization parameter
            for p in ['Index', 'Cutoff', 'b']:
                self.cutoff_model.set_mapper(
                    p, PLSuperExpCutoff.default_limits[p])
            self.cutoff_model.set_free('b', False)
            self.cutoff_model.set_flux(old_flux, emin=emin, emax=emax)

        if self.verbosity: print "cutoff_model is ", self.cutoff_model

        roi.modify(which=name, model=self.cutoff_model, keep_old_flux=False)

        if self.verbosity:
            print 'About to fit cutoff_model'
            roi.print_summary()

        fit()

        ll = -roi.logLikelihood(roi.parameters())

        if ll < d['hypothesis_0']['logLikelihood']:
            # if fit is worse than PowerLaw fit, then
            # restart fit with parameters almost
            # equal to best fit powerlaw
            self.cutoff_plaw = PLSuperExpCutoff(b=1)
            self.cutoff_plaw.set_free('b', False)
            self.cutoff_plaw.setp('norm',
                                  d['hypothesis_0']['spectrum']['Norm'])
            self.cutoff_plaw.setp('index',
                                  d['hypothesis_0']['spectrum']['Index'])
            self.cutoff_plaw.setp('e0', d['hypothesis_0']['spectrum']['e0'])
            self.cutoff_plaw.setp('cutoff', 1e6)

            roi.modify(which=name, model=self.cutoff_plaw, keep_old_flux=False)
            fit()

            if self.verbosity:
                print 'Redoing fit with cutoff same as plaw'
                print 'Before:'
                roi.print_summary()
                print fit()

        if self.verbosity:
            print 'Done fitting cutoff_model'
            roi.print_summary()

        d['hypothesis_1'] = source_dict(roi,
                                        name,
                                        emin=emin,
                                        emax=emax,
                                        flux_units=self.flux_units,
                                        energy_units=self.energy_units,
                                        verbosity=self.verbosity)

        d['TS_cutoff'] = d['hypothesis_1']['TS']['noquick'] - d[
            'hypothesis_0']['TS']['noquick']

        saved_state.restore()
Exemplo n.º 24
0
class ExtensionUpperLimit(object):
    defaults = (
        ("refit_position", False,
         "Refit position of source for each extension"),
        ("confidence", 0.95, "Convidence level of bayesian upper limit"),
        ("spatial_model", None,
         "Spatial model to use for extnesion upper limit. Default is Disk"),
        ("delta_log_like_limits", 10,
         """ delta_log_like_limits has same defintion as the parameter in
                                              pyLikelihood.IntegralUpperLimit.py function calc_int.
                                              Note, this corresponds to a change in the acutal likelihood by
                                              exp(10) ~ 20,000 which is sufficiently large that this is
                                              a pefectly fine threshold for stoping the integral."""
         ),
        ("fit_kwargs", dict(),
         "These kwargs are passed into ROIAnalysis.fit()"),
        ("spatial_model", Disk, " Spatial model to assume during upper limit"),
    )

    @keyword_options.decorate(defaults)
    def __init__(self, roi, which, **kwargs):
        """ Compute an upper limit on the source extension, by the "PDG Method". """
        keyword_options.process(self, kwargs)

        self.roi = roi
        self.which = which

        self.init_ts = roi.TS(which, quick=False)

        if self.init_ts < 4:
            # Bunt on extension upper limits for completely insignificant sources
            print 'Unable to compute extension upper limit for point-like source with too-small TS'
            self.extension_limit = None

        else:
            if not isinstance(self.spatial_model, type):
                raise Exception(
                    "The spatial model bust be a type, like Gaussian, not an instance, like Gaussian()"
                )

            # Note, since the input is the class, not the instance, the
            # position parmaeters have not yet been added on.
            n = self.spatial_model.param_names
            assert len(n) == 1 and n[0] == 'Sigma'

            self.saved_state = PointlikeState(roi)

            self.spatial_low_lim, self.spatial_hi_lim = self.spatial_model.default_limits[
                0]

            results = self._compute()

            self.saved_state.restore()

    def loglike(self, extension):
        """ Perform a pointlike spectral fit for a
            given extension and return the logLikelihood.  Note,
            most robust to start with initial spectral paramaters to
            avoid situations where the previous fit totally failed to
            converge. """

        roi = self.roi
        which = self.which

        if extension < self.spatial_low_lim: extension = self.spatial_low_lim
        roi.modify(which,
                   spatial_model=self.spatial_model(sigma=extension,
                                                    center=self.init_position,
                                                    free=np.asarray(
                                                        [True, True, False])),
                   keep_old_center=False)

        self.saved_state.restore(just_spectra=True)

        roi.fit(estimate_errors=False, **self.fit_kwargs)

        if self.refit_position:
            roi.fit_extension_fast(which=which, estimate_errors=False)
            roi.fit(estimate_errors=False, **self.fit_kwargs)

        ll = -roi.logLikelihood(roi.parameters())

        if not self.old_quiet and hasattr(self, 'll_0'):
            if self.refit_position:
                fit_position = roi.get_source(which).skydir
                position_string = ' (l,b)=(%.2f,%.2f), dist=%.2f,' % (
                    fit_position.l(), fit_position.b(),
                    np.degrees(fit_position.difference(self.init_position)))
            else:
                position_string = ''
            print '... sigma = %.2f,%s ll=%.2f, ll-ll_0=%.2f' % (
                extension, position_string, ll, ll - self.ll_0)

        return ll

    def _compute_integration_range(self):
        """ Estimate when the likelihood has fallen
            from the likelihood at Sigma=0 by an amount delta_log_like_limits. """
        roi = self.roi

        if not self.old_quiet:
            print "Computing Integration range, delta_log_like_limits=%s:" % self.delta_log_like_limits

        self.ll_0 = ll_0 = self.loglike(extension=0)

        f = lambda e: self.loglike(e) - (ll_0 - self.delta_log_like_limits)

        self.int_min = 0

        # unreasonable to have a source larger then half the ROI size.
        hi = roi.sa.maxROI / 2.0
        try:
            self.int_max = brentq(f, 0, hi, rtol=1e-4, xtol=1e-3)
        except:
            # Finding this intersect does not always work.
            print 'WARNING: Unable to find an acceptable upper limit for the integration range so defaulting to %s. Extension upper limit could be unreliable' % hi
            self.int_max = hi

        if not self.old_quiet:
            print "Integrating range is between %s and %s" % (self.int_min,
                                                              self.int_max)

    def _compute_max_loglikelihood(self):
        """ Note, it is important to evalulate the maximum loglikelihood
            so that the overall likelihood can be defined as
            exp(ll-ll_max) to avoid computing the exponential of a very
            large number in the case where ll_0 is much different from
            ll_max. Also, quad as an overall easier time since the maximum
            function value is (by defintion) equal to 1.  Since the overall
            function is normalized later, this is the most numerically
            stable normalization without causing any future trouble. """
        roi = self.roi

        if not self.old_quiet: print "Computing maximum loglikelihood:"

        # Note, maximum loglikelihood =  minimum -1*logLikelihood
        # Note, this does not have to be very precise. The purpose of
        # this function is just to get a reasonable guess at the best extension
        # to avoid floating point issues in the likelihood l=exp(ll-ll_max).
        self.sigma_max = fminbound(lambda e: -1 * self.loglike(e),
                                   self.int_min,
                                   self.int_max,
                                   disp=0,
                                   xtol=1e-3)
        self.ll_max = self.loglike(self.sigma_max)

        if not self.old_quiet:
            print "Maximum logLikelihood is %.2f" % self.ll_max

    def _compute_extension_limit(self):
        """ Compute the extnesion upper limit by

                (a) Sampling the function
                (b) Computing the CDF and normalizing
                (c) Finding when the normalized CDF is equal to the desired confidence
        
            Note, the quad accuracy parameters are roughly taken to be the same
            as in the pyLikelihood.IntegralUpperLimit.py calc_int function
            """
        roi = self.roi

        if not self.old_quiet:
            print "Finding the %s quantile of the likelihood" % self.confidence

        ll_to_l = lambda ll: np.exp(ll - self.ll_max)
        like = lambda e: ll_to_l(self.loglike(e))

        quantile = Quantile(like,
                            self.int_min,
                            self.int_max,
                            quad_kwargs=dict(epsrel=1e-3, epsabs=1))
        self.extension_limit = quantile(self.confidence)

        if not self.old_quiet:
            print "Extension upper limit is %.2f" % self.extension_limit

    def _compute(self):

        roi = self.roi
        which = self.which

        self.old_quiet = roi.quiet
        roi.quiet = True
        self.init_position = roi.get_source(which).skydir

        self._compute_integration_range()
        self._compute_max_loglikelihood()
        self._compute_extension_limit()

    def results(self):
        return dict(extension=self.extension_limit,
                    spatial_model=self.spatial_model.__name__,
                    confidence=self.confidence,
                    emin=self.roi.bin_edges[0],
                    emax=self.roi.bin_edges[-1],
                    delta_log_like_limits=self.delta_log_like_limits,
                    extension_units='degrees')
Exemplo n.º 25
0
    roi_dir=roi_dir,
    minROI=10 * np.sqrt(2),
    maxROI=10 * np.sqrt(2),
    seed=i,
    tstart=tstart,
    tstop=tstop,
    ltfrac=ltfrac,
    use_weighted_livetime=True,
    mc_energy=True,
    zenithcut=100,
    savedir=tempdir,
)

roi = sa.roi(roi_dir=roi_dir, diffuse_sources=diffuse_sources)

state = PointlikeState(roi)

results = dict(
    time=time, i=i, istr=istr, difftype=difftype, position=position, roi_dir=skydirdict(roi_dir), emin=emin, emax=emax
)

mc = diffusedict(roi)
ll_0 = logLikelihood(roi)

roi.print_summary()
roi.fit(use_gradient=False)
roi.print_summary()
ll_1 = logLikelihood(roi)

fit = diffusedict(roi)
results["pointlike"] = dict(mc=mc, fit=fit, ll_0=ll_0, ll_1=ll_1)