Example #1
0
    def Ts2(self, srcName, reoptimize=False, approx=True,
            tol=None, MaxIterations=10, verbosity=0):
        """Computes the TS value for a source indicated by "srcName."

        If "reoptimize=True" is selected this function will reoptimize
        the model up to "MaxIterations" given the tolerance "tol"
        (default is the tolerance selected for the overall fit).  If
        "appox=True" is selected (the default) it will renormalize the
        model (see _renorm).
        """

        saved_state = LikelihoodState(self)
        if verbosity > 0:
            print("*** Start Ts_dl ***")
        source_attributes = self.getExtraSourceAttributes()
        self.logLike.syncParams()
        src = self.logLike.getSource(srcName)
        self._ts_src = src
        freeParams = pyLike.DoubleVector()
        self.logLike.getFreeParamValues(freeParams)
        logLike1 = self.logLike.value()
        self.scaleSource(srcName, 1E-10)
        logLike0 = self.logLike.value()
        if tol is None:
            tol = self.tol
        if reoptimize:
            if verbosity > 0:
                print("** Do reoptimize")
            optFactory = pyLike.OptimizerFactory_instance()
            myOpt = optFactory.create(self.optimizer, self.logLike)
            Niter = 1
            while Niter <= MaxIterations:
                try:
                    myOpt.find_min(0, tol)
                    break
                except RuntimeError as e:
                    print(e)
                if verbosity > 0:
                    print("** Iteration :", Niter)
                Niter += 1
        else:
            if approx:
                try:
                    self._renorm()
                except ZeroDivisionError:
                    pass
        self.logLike.syncParams()
        logLike0 = max(self.logLike.value(), logLike0)
        Ts_value = 2 * (logLike1 - logLike0)
        self.scaleSource(srcName, 1E10)

        self.logLike.setFreeParamValues(freeParams)
        self.model = SourceModel(self.logLike)
        for src in source_attributes:
            self.model[src].__dict__.update(source_attributes[src])
        saved_state.restore()
        self.logLike.value()
        return Ts_value
Example #2
0
    def _scan_extension(self, name, **kwargs):

        saved_state = LikelihoodState(self.like)

        if not hasattr(self.components[0].like.logLike, 'setSourceMapImage'):
            loglike = self._scan_extension_pylike(name, **kwargs)
        else:
            loglike = self._scan_extension_fast(name, **kwargs)

        saved_state.restore()

        return loglike
Example #3
0
    def _scan_position(self, name, **kwargs):

        saved_state = LikelihoodState(self.like)

        skydir = kwargs.pop('skydir', self.roi[name].skydir)
        scan_cdelt = kwargs.pop('scan_cdelt', 0.02)
        nstep = kwargs.pop('nstep', 5)
        use_cache = kwargs.get('use_cache', True)
        use_pylike = kwargs.get('use_pylike', False)
        optimizer = kwargs.get('optimizer', {})

        # Fit without source
        self.zero_source(name, loglevel=logging.DEBUG)
        fit_output_nosrc = self._fit(loglevel=logging.DEBUG,
                                     **optimizer)
        self.unzero_source(name, loglevel=logging.DEBUG)
        saved_state.restore()
        self.free_norm(name, loglevel=logging.DEBUG)

        lnlmap = WcsNDMap.create(skydir=skydir, binsz=scan_cdelt, npix=(nstep, nstep),
                                 coordsys=wcs_utils.get_coordsys(self.geom.wcs))

        src = self.roi.copy_source(name)

        if use_cache and not use_pylike:
            self._create_srcmap_cache(src.name, src)

        coord = MapCoord.create(lnlmap.geom.get_coord(flat=True),
                                coordsys=lnlmap.geom.coordsys)
        scan_skydir = coord.skycoord.icrs
        for lon, lat, ra, dec in zip(coord.lon, coord.lat,
                                     scan_skydir.ra.deg, scan_skydir.dec.deg):

            spatial_pars = {'ra': ra, 'dec': dec}
            self.set_source_morphology(name,
                                       spatial_pars=spatial_pars,
                                       use_pylike=use_pylike)
            fit_output = self._fit(loglevel=logging.DEBUG,
                                   **optimizer)
            lnlmap.set_by_coord((lon, lat), fit_output['loglike'])

        self.set_source_morphology(name, spatial_pars=src.spatial_pars,
                                   use_pylike=use_pylike)
        saved_state.restore()

        lnlmap.data -= fit_output_nosrc['loglike']
        tsmap = WcsNDMap(lnlmap.geom, 2.0 * lnlmap.data)

        self._clear_srcmap_cache()
        return tsmap, fit_output_nosrc['loglike']
Example #4
0
    def _scan_position(self, name, **kwargs):

        saved_state = LikelihoodState(self.like)

        skydir = kwargs.pop('skydir', self.roi[name].skydir)
        scan_cdelt = kwargs.pop('scan_cdelt', 0.02)
        nstep = kwargs.pop('nstep', 5)
        use_cache = kwargs.get('use_cache', True)
        use_pylike = kwargs.get('use_pylike', False)
        optimizer = kwargs.get('optimizer', {})

        # Fit without source
        self.zero_source(name, loglevel=logging.DEBUG)
        fit_output_nosrc = self._fit(loglevel=logging.DEBUG,
                                     **optimizer)
        self.unzero_source(name, loglevel=logging.DEBUG)
        saved_state.restore()
        self.free_norm(name, loglevel=logging.DEBUG)

        lnlmap = Map.create(skydir, scan_cdelt, (nstep, nstep),
                            coordsys=wcs_utils.get_coordsys(self._skywcs))

        src = self.roi.copy_source(name)

        if use_cache and not use_pylike:
            self._create_srcmap_cache(src.name, src)

        scan_skydir = lnlmap.get_pixel_skydirs().transform_to('icrs')
        loglike = []
        for ra, dec in zip(scan_skydir.ra.deg, scan_skydir.dec.deg):

            spatial_pars = {'ra': ra, 'dec': dec}
            self.set_source_morphology(name,
                                       spatial_pars=spatial_pars,
                                       use_pylike=use_pylike)
            fit_output = self._fit(loglevel=logging.DEBUG,
                                   **optimizer)
            loglike += [fit_output['loglike']]

        self.set_source_morphology(name, spatial_pars=src.spatial_pars,
                                   use_pylike=use_pylike)
        saved_state.restore()

        lnlmap.data = np.array(loglike).reshape((nstep, nstep)).T
        lnlmap.data -= fit_output_nosrc['loglike']
        tsmap = Map(2.0 * lnlmap.data, lnlmap.wcs)

        self._clear_srcmap_cache()
        return tsmap, fit_output_nosrc['loglike']
    def __init__(self, like, sourcesOfInterest=None, optimizer="Minuit",
                 tol=1e-8, chatter=3, 
                 freeze_nuisance_sources_immediately = False,
                 freeze_nuisance_sources_after_optimize = False,
                 nuisance_npred_frac_max = 0.05,
                 nuisance_soi_sep_min_deg = 5.0,
                 calculate_full_ts_for_all = False):
        self.ver = "$Id: ROILikelihoodOptimizer.py 2795 2011-05-06 14:48:45Z sfegan $"
        self.res = {}
        self._like = like
        self._original_state = LikelihoodState(self._like)
        self._chatter = 0
        self._SOI = []
        self._freeze_nuisance_immediately = freeze_nuisance_sources_immediately
        self._freeze_nuisance_after_optimize = \
            freeze_nuisance_sources_after_optimize
        self._nuisance_npred_frac_max = nuisance_npred_frac_max
        self._nuisance_soi_sep_min_deg = nuisance_soi_sep_min_deg
        self._calculate_full_ts_for_all = calculate_full_ts_for_all

        if sourcesOfInterest is not None:
            if type(sourcesOfInterest) != list:
                sourcesOfInterest = [ sourcesOfInterest ]
            for s in sourcesOfInterest:
                if s in like.sourceNames():
                    self._SOI.append(s)
                else:
                    raise RuntimeError("Invalid source of interest: " + s)
        if optimizer:
            self._like.optimizer = optimizer
        if tol:
            self._like.tol = tol
        if chatter:
            self._chatter = chatter
Example #6
0
def Likelihood(like1,
               modelout,
               optimizer,
               statistic,
               specfile,
               results,
               plot,
               slist,
               optmodel=(False, ),
               SkipUL=False,
               Bayes=False,
               binedge=None,
               sedN=None,
               dellist=None):

    if statistic != 'BINNED':
        print '%s method is not implemented in this script' % statistic
        return None

    if dellist is not None:
        for sdel in dellist:
            if sdel in like1.model.srcNames:
                like1.deleteSource(sdel)
                print '%s is deleted' % sdel
        like1.syncSrcParams()
    if optmodel[0]:
        OptimizeModel(like1, slist=slist, TSmax=optmodel[1])

    like1.optObject = GetOptimizer(like1, optimizer)
    if like1.optimizer != optimizer:
        like1.optimizer = optimizer
    if isinstance(like1,
                  SummedLikelihood):  # not needed, results aren't changed
        for l in like1.components:
            l.optimizer = optimizer

    nloop = 0
    fitxml = modelout + '_fit%d' % (nloop + 1)
    while nloop < 3 and not Fit(like1, fitxml):
        print
        print
        nloop += 1
        fitxml = modelout + '_fit%d' % (nloop + 1)
        like1.optObject = GetOptimizer(like1, optimizer)
        gc.collect()

    if nloop == 3:
        print 'could not converge'
        fitxml = modelout + '_fit3'
    # pkl=results+'.pkl'
    # pkl=pkl.replace('.dat.pkl','.pkl')
    # with open(pkl, mode='wb') as f:
    #     pickle.dump(like, f)
    #     pickle.dump(likeobj, f)

    if isinstance(like1, SummedLikelihood):
        is_esame = True
        for l in like1.components[1:]:
            is_esame = (is_esame and np.allclose(like1.components[0].energies,
                                                 l.energies))
        if is_esame:
            E = like1.components[0].energies
        else:
            E = (min([l.energies[0] for l in like1.components]),
                 max([l.energies[-1] for l in like1.components]))
    else:
        E = like1.energies
    emin = E[0]
    emax = E[-1]

    print '\nComputing TS values for each extended source\n'
    print 'Photon fluxes are computed for the energy range ' + repr(
        emin) + ' to ' + repr(emax) + ' MeV\n'

    dic = OrderedDict()
    free = []
    for sname in like1.model.srcNames:
        flag = False
        src = OrderedDict()
        func = like1.model[sname].funcs['Spectrum']
        for param in func.paramNames:
            flag = (flag or func.params[param].isFree())
            if func.params[param].isFree():
                free.append('%s:%s' % (sname, param))
                src[param] = (func[param], func.params[param].error())
            else:
                src[param] = func[param]

        flux = like1.flux(sname, emin=emin, emax=emax)
        enflux = like1.energyFlux(sname, emin=emin, emax=emax)
        if flag:
            if sname.find('gll') < 0 and sname.find('iso') < 0:
                for param in func.paramNames:
                    src['scale ' + param] = func.params[param].getScale()
                src['Spectrum'] = func.genericName()
                src['TS value'] = like1.Ts(sname)
            eflx = like1.fluxError(sname, emin=emin, emax=emax)
            eenflx = like1.energyFluxError(sname, emin=emin, emax=emax)
            src['Flux'] = (flux, eflx)
            src['EnFlux'] = (enflux, eenflx)
        else:
            src['Flux'] = flux
            src['EnFlux'] = enflux

        if like1.model[sname].getType() == 'Diffuse':
            tmplist = []
            nbin = len(E) - 1
            for ie in range(nbin):
                tmplist.append(like1.flux(sname, emin=E[ie], emax=E[ie + 1]))
            src['Diff Flux'] = tmplist
        dic[sname] = src
    dic['Energies'] = E
    if len(free) > 1:
        dic['Free Parameters'] = free
        dic['Covariance'] = like1.covariance
    dic['-LogLike'] = like1()

    WriteResult(dic, results)
    PrintResult(dic)
    '''
      Calculate Upper Limit. You can choose bayesian or frequentist algorithm.

    '''
    if SkipUL or len(slist) == 0:
        print 'skip UL calculation'
    elif Bayes:
        ul = {}
        ullist.append(ul)

        for sname in slist:
            print sname
            flux_ul, ulresults = calc_int(like1, sname, emin=emin, emax=emax)
            try_ul_calc = 1
            par = like1.normPar(sname)
            while flux_ul == -1 and try_ul_calc <= 10:
                par.setBounds(par.getBounds()[0], par.getBounds()[1] * 10)
                like1.syncSrcParams(sname)
                flux_ul, ulresults = calc_int(like1,
                                              sname,
                                              emin=emin,
                                              emax=emax)
                try_ul_calc += 1
            if flux_ul == -1:
                print 'could not compute upper limit'
            else:
                print '%lg ph/cm^2/s for emin=%.1f, emax=%.1f (Bayesian UL)' % (
                    flux_ul, ulresults['flux_emin'], ulresults['flux_emax'])
                ul[sname] = ulresults
                dic[sname]['dNdE UL'] = ulresults['ul_value'] * par.getScale()
                ## dN/dE UL at a reference enrergy (ph/cm^2/s)
                dic[sname]['Flux UL'] = flux_ul
                saved_state = LikelihoodState(like1)
                par.setValue(ulresults['ul_value'])
                dic[sname]['EnFlux UL'] = like1.energyFlux(sname, emin, emax)
                saved_state.restore()
                dic[sname]['UL algo'] = 'bayesian'
                WriteResult(dic, results)

    else:
        ul = UpperLimits(like1)
        ullist.append(ul)

        for sname in slist:
            print sname
            try:
                flux_ul, pref_ul = ul[sname].compute(emin=emin, emax=emax)
                print ul[sname].results[-1]
                dic[sname]['dNdE UL'] = pref_ul * like1.normPar(
                    sname).getScale()
                dic[sname]['Flux UL'] = flux_ul
                saved_state = LikelihoodState(like1)
                like1.normPar(sname).setValue(pref_ul)
                dic[sname]['EnFlux UL'] = like1.energyFlux(sname, emin, emax)
                saved_state.restore()
                dic[sname]['UL algo'] = 'frequentist'
                dic[sname]['UL dlogL'] = ul[sname].results[-1].delta
                WriteResult(dic, results)
            except RuntimeError:
                print(traceback.format_exc())
                print('could not compute upper limit')

    # with open(pkl, mode='ab') as f:
    #     pickle.dump(ul, f)

    try:
        if specfile != 'None' and specfile != '' and specfile is not None:
            like1.writeCountsSpectra(specfile, len(E) - 1)
    except NotImplementedError as e:
        print 'NotImplementedError:', e
    except RuntimeError:
        print(traceback.format_exc())

    cnt = 0.
    for name in like1.model.srcNames:
        cnt += like1.NpredValue(name)
    print '\nTotal number of observed counts:', int(like1.total_nobs())
    print 'Total number of model events:', cnt
    print '\n-log(Likelihood):', like1()

    print '\nWriting fitted model to', modelout
    # like1.logLike.writeXml(modelout)
    os.rename(fitxml, modelout)
    '''
      Calculate Fermi SED data points.

    '''
    if binedge is not None:
        ul_alg = 'bayesian' if Bayes else 'frequentist'
        print '\n%s Upper Limit will be calculated' % ul_alg
        print 'calculating SED...'
        for sname in slist:
            print sname
            func = like1.model[sname].funcs['Spectrum']
            if func.genericName() == 'PowerLaw':
                idx = func.params['Index'].getTrueValue()
            elif flagsed2 and func.genericName() == 'LogParabola':
                print 'LogParabola'
                idx = []
                ebin = np.asarray(binedge)
                ebin = np.sqrt(ebin[:-1] * ebin[1:])
                Alpha = func.params['alpha'].getTrueValue()
                Beta = func.params['beta'].getTrueValue()
                Eb = func.params['Eb'].getTrueValue()
                for valE in ebin:
                    idx.append(-(Alpha + 2 * Beta * np.log(valE / Eb)))
                print 'approximate PL indices:', idx
            elif flagsed2 and func.genericName() == 'PLSuperExpCutoff':
                print 'PLSuperExpCutoff'
                idx = []
                ebin = np.asarray(binedge)
                ebin = np.sqrt(ebin[:-1] * ebin[1:])
                Index1 = func.params['Index1'].getTrueValue()
                Index2 = func.params['Index2'].getTrueValue()
                Cutoff = func.params['Cutoff'].getTrueValue()
                for valE in ebin:
                    idx.append(Index1 - Index2 * pow(valE / Cutoff, Index2))
                print 'approximate PL indices:', idx
            else:
                idx = -2.
            GetSED(like1,
                   sname,
                   index=idx,
                   be=binedge,
                   ul_alg=ul_alg,
                   fnheader=sedN)
        print 'Done!'
    '''
      Plot count graph.

    '''
    if plot:
        try:
            print ''
            like1.setPlotter('mpl')
            like1.plot()
            print ''
        except:
            print 'could not plot'

    return dic
Example #7
0
    def _localize(self, name, **kwargs):

        nstep = kwargs.get('nstep')
        dtheta_max = kwargs.get('dtheta_max')
        update = kwargs.get('update', True)
        prefix = kwargs.get('prefix', '')
        use_cache = kwargs.get('use_cache', False)
        free_background = kwargs.get('free_background', False)
        free_radius = kwargs.get('free_radius', None)

        saved_state = LikelihoodState(self.like)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [s.name for s in
                         self.roi.get_sources(skydir=skydir,
                                              distance=free_radius,
                                              exclude=diff_sources)]
            self.free_sources_by_name(free_srcs, pars='norm',
                                      loglevel=logging.DEBUG)

        src = self.roi.copy_source(name)
        skydir = src.skydir
        skywcs = self._skywcs
        src_pix = skydir.to_pixel(skywcs)

        fit0 = self._fit_position_tsmap(name, prefix=prefix,
                                        dtheta_max=dtheta_max,
                                        zmin=-3.0,
                                        use_pylike=False)

        self.logger.debug('Completed localization with TS Map.\n'
                          '(ra,dec) = (%10.4f,%10.4f) '
                          '(glon,glat) = (%10.4f,%10.4f)',
                          fit0['ra'], fit0['dec'],
                          fit0['glon'], fit0['glat'])

        # Fit baseline (point-source) model
        self.free_norm(name)
        fit_output = self._fit(loglevel=logging.DEBUG, **
                               kwargs.get('optimizer', {}))

        # Save likelihood value for baseline fit
        loglike0 = fit_output['loglike']
        self.logger.debug('Baseline Model Likelihood: %f', loglike0)

        o = defaults.make_default_tuple(defaults.localize_output)
        o.name = name
        o.config = kwargs
        o.fit_success = True
        o.loglike_base = loglike0
        o.loglike_loc = np.nan
        o.dloglike_loc = np.nan

        if fit0['fit_success']:
            scan_cdelt = 2.0 * fit0['pos_r95'] / (nstep - 1.0)
        else:
            scan_cdelt = np.abs(skywcs.wcs.cdelt[0])

        self.logger.debug('Refining localization search to '
                          'region of width: %.4f deg',
                          scan_cdelt * nstep)

        fit1 = self._fit_position_scan(name,
                                       skydir=fit0['skydir'],
                                       scan_cdelt=scan_cdelt,
                                       **kwargs)

        o.loglike_loc = fit1['loglike']
        o.dloglike_loc = o.loglike_loc - o.loglike_base
        o.tsmap = fit0.pop('tsmap')
        o.tsmap_peak = fit1.pop('tsmap')
        # o.update(fit1)

        # Best fit position and uncertainty from fit to TS map
        o.fit_init = fit0

        # Best fit position and uncertainty from pylike scan
        o.fit_scan = fit1
        o.update(fit1)

        cdelt0 = np.abs(skywcs.wcs.cdelt[0])
        cdelt1 = np.abs(skywcs.wcs.cdelt[1])
        pix = fit1['skydir'].to_pixel(skywcs)
        o.pos_offset = skydir.separation(fit1['skydir']).deg
        o.xpix = float(pix[0])
        o.ypix = float(pix[1])
        o.deltax = (o.xpix - src_pix[0]) * cdelt0
        o.deltay = (o.ypix - src_pix[1]) * cdelt1

        o.ra_preloc = skydir.ra.deg
        o.dec_preloc = skydir.dec.deg
        o.glon_preloc = skydir.galactic.l.deg
        o.glat_preloc = skydir.galactic.b.deg

        if o.pos_offset > dtheta_max:
            o.fit_success = False

        self.logger.info('Localization completed with new position:\n'
                         '(  ra, dec) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
                         '(glon,glat) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
                         'offset = %8.4f r68 = %8.4f r95 = %8.4f r99 = %8.4f',
                         o.ra, o.ra_err, o.dec, o.dec_err,
                         o.glon, o.glon_err, o.glat, o.glat_err,
                         o.pos_offset, o.pos_r68, o.pos_r95, o.pos_r99)

        if not o.fit_success:
            self.logger.warning('Fit to localization contour failed.')
        elif not o.fit_inbounds:
            self.logger.warning('Best-fit position outside of search region.')
        else:
            self.logger.info('Localization succeeded.')

        if update and ((not o.fit_success) or (not o.fit_inbounds)):
            self.logger.warning(
                'Localization failed.  Keeping existing position.')

        if update and o.fit_success and o.fit_inbounds:
            self.logger.info('Updating source %s '
                             'to localized position.', name)
            src = self.delete_source(name)
            src.set_position(fit1['skydir'])
            self.add_source(name, src, free=True)
            fit_output = self.fit(loglevel=logging.DEBUG)
            o.loglike_loc = fit_output['loglike']
            o.dloglike_loc = o.loglike_loc - o.loglike_base
            src = self.roi.get_source_by_name(name)
            self.logger.info('LogLike: %12.3f DeltaLogLike: %12.3f',
                             o.loglike_loc, o.dloglike_loc)

            src['glon_err'] = o.glon_err
            src['glat_err'] = o.glat_err
            src['ra_err'] = o.glon_err
            src['dec_err'] = o.glat_err
            src['pos_err'] = o.pos_err
            src['pos_err_semimajor'] = o.pos_err_semimajor
            src['pos_err_semiminor'] = o.pos_err_semiminor
            src['pos_r68'] = o.pos_r68
            src['pos_r95'] = o.pos_r95
            src['pos_r99'] = o.pos_r99
            src['pos_angle'] = o.pos_angle
            src['pos_gal_cov'] = o.pos_gal_cov
            src['pos_gal_corr'] = o.pos_gal_corr
            src['pos_cel_cov'] = o.pos_cel_cov
            src['pos_cel_corr'] = o.pos_cel_corr
        else:
            saved_state.restore()
            self._sync_params(name)
            self._update_roi()

        return o
Example #8
0
    def _make_sed(self, name, **config):

        bin_index = config['bin_index']
        use_local_index = config['use_local_index']
        free_background = config['free_background']
        free_radius = config['free_radius']
        ul_confidence = config['ul_confidence']
        cov_scale = config['cov_scale']
        loge_bins = config['loge_bins']

        if not loge_bins or loge_bins is None:
            loge_bins = self.log_energies
        else:
            loge_bins = np.array(loge_bins)

        nbins = len(loge_bins) - 1
        max_index = 5.0
        min_flux = 1E-30
        npts = self.config['gtlike']['llscan_npts']
        loge_bounds = self.loge_bounds

        # Output Dictionary
        o = {'name': name,
             'loge_min': loge_bins[:-1],
             'loge_max': loge_bins[1:],
             'loge_ctr': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'loge_ref': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'e_min': 10 ** loge_bins[:-1],
             'e_max': 10 ** loge_bins[1:],
             'e_ctr': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'e_ref': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'ref_flux': np.zeros(nbins),
             'ref_eflux': np.zeros(nbins),
             'ref_dnde': np.zeros(nbins),
             'ref_dnde_e_min': np.zeros(nbins),
             'ref_dnde_e_max': np.zeros(nbins),
             'ref_e2dnde': np.zeros(nbins),
             'ref_npred': np.zeros(nbins),
             'norm': np.zeros(nbins),
             'flux': np.zeros(nbins),
             'eflux': np.zeros(nbins),
             'dnde': np.zeros(nbins),
             'e2dnde': np.zeros(nbins),
             'index': np.zeros(nbins),
             'npred': np.zeros(nbins),
             'ts': np.zeros(nbins),
             'loglike': np.zeros(nbins),
             'norm_scan': np.zeros((nbins, npts)),
             'dloglike_scan': np.zeros((nbins, npts)),
             'loglike_scan': np.zeros((nbins, npts)),
             'fit_quality': np.zeros(nbins),
             'fit_status': np.zeros(nbins),
             'correlation': {},
             'model_flux': {},
             'config': config
             }

        for t in ['norm', 'flux', 'eflux', 'dnde', 'e2dnde']:
            o['%s_err' % t] = np.zeros(nbins) * np.nan
            o['%s_err_hi' % t] = np.zeros(nbins) * np.nan
            o['%s_err_lo' % t] = np.zeros(nbins) * np.nan
            o['%s_ul95' % t] = np.zeros(nbins) * np.nan
            o['%s_ul' % t] = np.zeros(nbins) * np.nan

        saved_state = LikelihoodState(self.like)
        source = self.components[0].like.logLike.getSource(str(name))

        # Perform global spectral fit
        self._latch_free_params()
        self.free_sources(False, pars='shape', loglevel=logging.DEBUG)
        self.free_source(name, pars=config.get('free_pars', None),
                         loglevel=logging.DEBUG)
        fit_output = self.fit(loglevel=logging.DEBUG, update=False,
                              min_fit_quality=2)
        o['model_flux'] = self.bowtie(name)
        spectral_pars = gtutils.get_function_pars_dict(source.spectrum())
        o['SpectrumType'] = self.roi[name]['SpectrumType']
        o.update(model_utils.pars_dict_to_vectors(o['SpectrumType'],
                                                  spectral_pars))

        param_names = gtutils.get_function_par_names(o['SpectrumType'])
        npar = len(param_names)
        o['param_covariance'] = np.empty((npar, npar), dtype=float) * np.nan

        pmask0 = np.empty(len(fit_output['par_names']), dtype=bool)
        pmask0.fill(False)
        pmask1 = np.empty(npar, dtype=bool)
        pmask1.fill(False)
        for i, pname in enumerate(param_names):

            for j, pname2 in enumerate(fit_output['par_names']):
                if name != fit_output['src_names'][j]:
                    continue
                if pname != pname2:
                    continue
                pmask0[j] = True
                pmask1[i] = True

        src_cov = fit_output['covariance'][pmask0, :][:, pmask0]
        o['param_covariance'][np.ix_(pmask1, pmask1)] = src_cov
        o['param_correlation'] = utils.cov_to_correlation(
            o['param_covariance'])

        for i, pname in enumerate(param_names):
            o['param_covariance'][i, :] *= spectral_pars[pname]['scale']
            o['param_covariance'][:, i] *= spectral_pars[pname]['scale']

        self._restore_free_params()

        self.logger.info('Fitting SED')

        # Setup background parameters for SED
        self.free_sources(False, pars='shape')
        self.free_norm(name)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [s.name for s in
                         self.roi.get_sources(skydir=skydir,
                                              distance=free_radius,
                                              exclude=diff_sources)]
            self.free_sources_by_name(free_srcs, pars='norm',
                                      loglevel=logging.DEBUG)

        if cov_scale is not None:
            self._latch_free_params()
            self.zero_source(name)
            self.fit(loglevel=logging.DEBUG, update=False)
            srcNames = list(self.like.sourceNames())
            srcNames.remove(name)
            self.constrain_norms(srcNames, cov_scale)
            self.unzero_source(name)
            self._restore_free_params()

        # Precompute fluxes in each bin from global fit
        gf_bin_flux = []
        gf_bin_index = []
        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            emin = 10 ** logemin
            emax = 10 ** logemax
            delta = 1E-5
            f = self.like[name].flux(emin, emax)
            f0 = self.like[name].flux(emin * (1 - delta), emin * (1 + delta))
            f1 = self.like[name].flux(emax * (1 - delta), emax * (1 + delta))

            if f0 > min_flux and f1 > min_flux:
                g = 1 - np.log10(f0 / f1) / np.log10(emin / emax)
                gf_bin_index += [g]
                gf_bin_flux += [f]
            else:
                gf_bin_index += [max_index]
                gf_bin_flux += [min_flux]

        old_spectrum = source.spectrum()
        old_pars = copy.deepcopy(self.roi[name].spectral_pars)
        old_type = self.roi[name]['SpectrumType']

        spectrum_pars = {
            'Prefactor':
                {'value': 1.0, 'scale': 1E-13, 'min': 1E-10,
                    'max': 1E10, 'free': True},
            'Index':
                {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0, 'free': False},
            'Scale':
                {'value': 1E3, 'scale': 1.0, 'min': 1., 'max': 1E6, 'free': False},
        }

        self.set_source_spectrum(str(name), 'PowerLaw',
                                 spectrum_pars=spectrum_pars,
                                 update_source=False)

        src_norm_idx = -1
        free_params = self.get_params(True)
        for j, p in enumerate(free_params):
            if not p['is_norm']:
                continue
            if p['is_norm'] and p['src_name'] == name:
                src_norm_idx = j

            o['correlation'][p['src_name']] = np.zeros(nbins) * np.nan

        self._fitcache = None

        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            logectr = 0.5 * (logemin + logemax)
            emin = 10 ** logemin
            emax = 10 ** logemax
            ectr = 10 ** logectr
            ectr2 = ectr**2

            saved_state_bin = LikelihoodState(self.like)
            if use_local_index:
                o['index'][i] = -min(gf_bin_index[i], max_index)
            else:
                o['index'][i] = -bin_index

            self.set_norm(name, 1.0, update_source=False)
            self.set_parameter(name, 'Index', o['index'][i], scale=1.0,
                               update_source=False)
            self.like.syncSrcParams(str(name))

            ref_flux = self.like[name].flux(emin, emax)

            o['ref_flux'][i] = self.like[name].flux(emin, emax)
            o['ref_eflux'][i] = self.like[name].energyFlux(emin, emax)
            o['ref_dnde'][i] = self.like[name].spectrum()(pyLike.dArg(ectr))
            o['ref_dnde_e_min'][i] = self.like[
                name].spectrum()(pyLike.dArg(emin))
            o['ref_dnde_e_max'][i] = self.like[
                name].spectrum()(pyLike.dArg(emax))
            o['ref_e2dnde'][i] = o['ref_dnde'][i] * ectr2
            cs = self.model_counts_spectrum(
                name, logemin, logemax, summed=True)
            o['ref_npred'][i] = np.sum(cs)

            normVal = self.like.normPar(name).getValue()
            flux_ratio = gf_bin_flux[i] / ref_flux
            newVal = max(normVal * flux_ratio, 1E-10)
            self.set_norm(name, newVal, update_source=False)
            self.set_norm_bounds(name, [newVal * 1E-6, newVal * 1E4])

            self.like.syncSrcParams(str(name))
            self.free_norm(name)
            self.logger.debug('Fitting %s SED from %.0f MeV to %.0f MeV' %
                              (name, emin, emax))
            self.set_energy_range(logemin, logemax)

            fit_output = self._fit(**config['optimizer'])
            free_params = self.get_params(True)
            for j, p in enumerate(free_params):

                if not p['is_norm']:
                    continue

                o['correlation'][p['src_name']][i] = \
                    fit_output['correlation'][src_norm_idx, j]

            o['fit_quality'][i] = fit_output['fit_quality']
            o['fit_status'][i] = fit_output['fit_status']

            flux = self.like[name].flux(emin, emax)
            eflux = self.like[name].energyFlux(emin, emax)
            dnde = self.like[name].spectrum()(pyLike.dArg(ectr))

            o['norm'][i] = flux / o['ref_flux'][i]
            o['flux'][i] = flux
            o['eflux'][i] = eflux
            o['dnde'][i] = dnde
            o['e2dnde'][i] = dnde * ectr2

            cs = self.model_counts_spectrum(name, logemin,
                                            logemax, summed=True)
            o['npred'][i] = np.sum(cs)
            o['loglike'][i] = fit_output['loglike']

            lnlp = self.profile_norm(name, logemin=logemin, logemax=logemax,
                                     savestate=True, reoptimize=True,
                                     npts=npts, optimizer=config['optimizer'])

            o['ts'][i] = max(
                2.0 * (fit_output['loglike'] - lnlp['loglike'][0]), 0.0)
            o['loglike_scan'][i] = lnlp['loglike']
            o['dloglike_scan'][i] = lnlp['dloglike']
            o['norm_scan'][i] = lnlp['flux'] / ref_flux

            ul_data = utils.get_parameter_limits(
                lnlp['flux'], lnlp['dloglike'])

            o['norm_err_hi'][i] = ul_data['err_hi'] / ref_flux
            o['norm_err_lo'][i] = ul_data['err_lo'] / ref_flux

            if np.isfinite(ul_data['err_lo']):
                o['norm_err'][i] = 0.5 * (ul_data['err_lo'] +
                                          ul_data['err_hi']) / ref_flux
            else:
                o['norm_err'][i] = ul_data['err_hi'] / ref_flux

            o['norm_ul95'][i] = ul_data['ul'] / ref_flux

            ul_data = utils.get_parameter_limits(lnlp['flux'],
                                                 lnlp['dloglike'],
                                                 cl_limit=ul_confidence)
            o['norm_ul'][i] = ul_data['ul'] / ref_flux

            saved_state_bin.restore()

        for t in ['flux', 'eflux', 'dnde', 'e2dnde']:

            o['%s_err' % t] = o['norm_err'] * o['ref_%s' % t]
            o['%s_err_hi' % t] = o['norm_err_hi'] * o['ref_%s' % t]
            o['%s_err_lo' % t] = o['norm_err_lo'] * o['ref_%s' % t]
            o['%s_ul95' % t] = o['norm_ul95'] * o['ref_%s' % t]
            o['%s_ul' % t] = o['norm_ul'] * o['ref_%s' % t]

        self.set_energy_range(loge_bounds[0], loge_bounds[1])
        self.set_source_spectrum(str(name), old_type,
                                 spectrum_pars=old_pars,
                                 update_source=False)

        saved_state.restore()
        self._sync_params(name)

        if cov_scale is not None:
            self.remove_priors()

        return o
Example #9
0
def compute_curve(name,pwndata,phimin, phimax, fit_emin):
    """Function to compute the points TS vs (phi range)"""

    TS=np.zeros_like(phimin)


    print 'First, analyzing unphased data'
    roi=setup_pwn(name,pwndata,phase=[0,1], quiet=True, fit_emin=fit_emin)
    print 'bin edges:',roi.bin_edges

    print roi

    gtlike = roi_gtlike.Gtlike(roi)
    like=gtlike.like

#    for i in range(len(like.model.params)):
#        like.freeze(i)
#    like[like.par_index(name, 'Prefactor')].setFree(1)
    like.fit()
    like[like.par_index(name, 'Prefactor')].setBounds(1.0e-5,1.0e5)
    print like.model

    # freeze index of PWN
    index=like[like.par_index(name, 'Index')]
    index.setTrueValue(-2)
    
    index.setFree(1)

    # fix everything in the ROI except prefactor
    #for i in range(len(like.model.params)):
    #    like.freeze(i)
    #like[like.par_index(name, 'Prefactor')].setFree(1)

    print like.model

    # now, freeze everything else in the ROI:
    saved_state = LikelihoodState(gtlike.like)

    print "----------------------------------Begin Loop---------------------------------------"

    for i,phase in enumerate(zip(phimin,phimax)):
        print 'Loop %4d/%4d: phase min=%.2f, max=%.2f' % (i+1,len(phimin),phase[0],phase[1])

        roi=setup_pwn(name,pwndata,phase, quiet=True, fit_emin=fit_emin)
        gtlike = roi_gtlike.Gtlike(roi)
        print gtlike.like.model

        # give model the same parameters as global fit.
        saved_state.like = gtlike.like
        saved_state.restore()
        
        for i in range(len(gtlike.like.model.params)):
            gtlike.like.freeze(i)
        gtlike.like[gtlike.like.par_index(name, 'Prefactor')].setFree(1)
        gtlike.like[like.par_index(name, 'Prefactor')].setBounds(1.0e-5,1.0e5)

        gtlike.like.fit()
        # n.b. no need to reoptimize since only one parameter
        # is fit.
        TS[i] = gtlike.like.Ts(name,reoptimize=False)

        print 'phase=%s, TS=%s' % (phase,TS[i])

        f=open("results_%s.yaml" % name,"w")
        yaml.dump(dict(TS=TS[0:i+1].tolist(), 
                       phimin=phimin[0:i+1].tolist(), 
                       phimax=phimax[0:i+1].tolist()),
                  f)
        f.close()

    return TS,phimin,phimax
def calc_chi2(like, srcName, cl=0.95, verbosity=0,
              skip_global_opt=False, freeze_all=False,
              profile_optimizer = None, emin=100, emax=3e5, poi_values = []):
    """Calculate an integral upper limit by the profile likelihood (chi2) method.

  Description:

    Calculate an upper limit using the likelihood ratio test, i.e. by
    supposing the Likelihood is distributed as chi-squared of one degree of
    freedom and finding the point at which the it decreases by the
    required amount to get an upper limit at a certain confidence limit.

    This function first uses the optimizer to find the global minimum,
    then uses the new root finding algorithm to find the point at which
    the Likelihood decreases by the required amount. The background
    parameters can be frozen at their values found in the global minimum
    or optimized freely at each point.

  Inputs:

    like -- a binned or unbinned likelihood object which has the
        desired model. Be careful to freeze the index of the source for
        which the upper limit is being if you want to quote a limit with a
        fixed index.

    srcName -- the name of the source for which to compute the limit.

    cl -- probability level for the upper limit.

    verbosity -- verbosity level. A value of zero means no output will
        be written. With a value of one the function writes some values
        describing its progress, but the optimizers don't write
        anything. Values larger than one direct the optimizer to produce
        verbose output.

    skip_global_opt -- if the model is already at the global minimum
        value then you can direct the integrator to skip the initial step
        to find the minimum. If you specify this option and the model is
        NOT at the global minimum your results will likely be wrong.

    freeze_all -- freeze all other parameters at the values of the
        global minimum.

    profile_optimizer -- Alternative optimizer to use when computing
        the profile, after the global minimum has been found. Only set
        this if you want to use a different optimizer for calculating the
        profile than for calculating the global minimum.

    emin, emax -- Bounds on energy range over which the flux should be
        integrated.

    poi_values -- Points of interest: values of the normalization
        parameter corresponding to fluxes of interest to the user. The
        profile likelihood be evaluated at each of these values and the
        equivalent probability under the LRT returned in the vector
        \"results.poi_probs\". This parameter must be a vector, and can be
        empty.

  Outputs: (limit, results)

    limit -- the flux limit found.

    results -- a dictionary of additional results from the calculation,
        such as the value of the peak value etc.
  """

    saved_state = LikelihoodState(like)

    ###########################################################################
    #
    # This function has 2 main components:
    #
    # 1) Find the global maximum of the likelihood function using ST
    # 2) Find the point at which it falls by the appropriate amount
    #
    ###########################################################################

    # Optimizer uses verbosity level one smaller than given here
    optverbosity = max(verbosity-1, 0)

    ###########################################################################
    #
    # 1) Find the global maximum of the likelihood function using ST
    #
    ###########################################################################

    par = like.normPar(srcName)

    fitstat = None
    if not skip_global_opt:
        # Make sure desired parameter is free during global optimization
        par.setFree(1)
        like.syncSrcParams(srcName)

        # Perform global optimization
        if verbosity:
            print "Finding global maximum"
        try:
            like.fit(optverbosity)
            fitstat = like.optObject.getRetCode()
            if verbosity and fitstat != 0:
                print "Minimizer returned with non-zero code: ",fitstat
        except RuntimeError:
            print "Failed to find global maximum, results may be wrong"
            pass
        pass
    
    original_optimizer = like.optimizer
    if profile_optimizer != None:
        like.optimizer = profile_optimizer

    # Store values of global fit
    maxval = -like()
    fitval = par.getValue()
    fiterr = par.error()
    limlo, limhi = par.getBounds()
    if verbosity:
        print "Maximum of %g with %s = %g +/- %g"\
              %(-maxval,srcName,fitval,fiterr)

    # Freeze all other model parameters if requested (much faster!)
    if(freeze_all):
        for i in range(len(like.model.params)):
            like.model[i].setFree(0)
            like.syncSrcParams(like[i].srcName)

    # Freeze the parameter of interest
    par.setFree(0)
    like.syncSrcParams(srcName)

    # Set up the caches for the optimum values and nuisance parameters
    optvalue_cache = dict()
    nuisance_cache = dict()
    optvalue_cache[fitval] = maxval
    _cache_nuisance(fitval, like, nuisance_cache)

    # Test if all parameters are frozen (could be true if we froze
    # them above or if they were frozen in the user's model
    all_frozen = True
    for i in range(len(like.model.params)):
        if like.model[i].isFree():
            all_frozen = False
            break

    ###########################################################################
    #
    # 2) Find the point at which the likelihood has fallen by the
    #    appropriate amount
    #
    ###########################################################################

    delta_log_like = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)

    if verbosity:
        print "Finding limit (delta log Like=%g)"\
              %(delta_log_like)

    [xunused, xlim, yunused, ylim, exact_root_evals, approx_root_evals] = \
    _find_interval(like, par, srcName, all_frozen,
                   maxval, fitval, limlo, limhi,
                   delta_log_like, verbosity, like.tol,
                   True, 5, optvalue_cache, nuisance_cache)

    if verbosity:
        print "Limit: %g (%d full fcn evals and %d approx)"\
              %(xlim,exact_root_evals,approx_root_evals)

    ###########################################################################
    #
    # Evaluate the probabilities of the "points of interest" using the LRT
    #
    ###########################################################################
    
    poi_dlogL = [];
    poi_probs = [];
    for xval in poi_values:
        if(xval >= limhi):
            dlogL = None
            pval = 1.0
        elif(xval <= limlo):
            dlogL = None
            pval = 0.0
        else:
            dlogL = _loglike(xval, like, par, srcName, maxval, verbosity,
                             all_frozen, optvalue_cache, nuisance_cache)
            if(xval<fitval):
                pval = 0.5*(1-scipy.stats.chi2.cdf(-2*dlogL,1))
            else:
                pval = 0.5*(1+scipy.stats.chi2.cdf(-2*dlogL,1))
            if verbosity:
                print "POI %g: Delta log Like = %g (Pr=%g)"%(xval,dlogL,pval)

        poi_probs.append(pval)
        poi_dlogL.append(dlogL)
    
    like.optimizer = original_optimizer

    ###########################################################################
    #        
    # Calculate the integral flux at the upper limit parameter value
    #
    ###########################################################################
    
    # Set the parameter value that corresponds to the desired C.L.
    par.setValue(xlim)

    # Evaluate the flux corresponding to this upper limit.
    ul_flux = like[srcName].flux(emin, emax)

    saved_state.restore()

    # Pack up all the results
    results = dict(all_frozen     = all_frozen,
                   ul_frac        = cl,
                   ul_flux        = ul_flux,
                   ul_value       = xlim,
                   ul_loglike     = maxval+ylim-delta_log_like,
                   ul_dloglike    = ylim-delta_log_like,
                   peak_fitstatus = fitstat,
                   peak_value     = fitval,
                   peak_dvalue    = fiterr,
                   peak_loglike   = maxval,
                   poi_values     = poi_values,
                   poi_probs      = poi_probs,
                   poi_dlogL      = poi_dlogL,
                   flux_emin      = emin,
                   flux_emax      = emax)

    return ul_flux, results
def calc_int(like, srcName, cl=0.95, verbosity=0,
             skip_global_opt=False, be_very_careful=False, freeze_all=False,
             delta_log_like_limits = 10.0, profile_optimizer = None,
             emin=100, emax=3e5, poi_values = []):
    """Calculate an integral upper limit by direct integration.

  Description:

    Calculate an integral upper limit by integrating the likelihood
    function up to a point which contains a given fraction of the total
    probability. This is a fairly standard Bayesian approach to
    calculating upper limits, which assumes a uniform prior probability.
    The likelihood function is not assumed to be distributed as
    chi-squared.

    This function first uses the optimizer to find the global minimum,
    then uses the scipy.integrate.quad function to integrate the
    likelihood function with respect to one of the parameters. During the
    integration, the other parameters can be frozen at their values found
    in the global minimum or optimized freely at each point.

  Inputs:

    like -- a binned or unbinned likelihood object which has the
        desired model. Be careful to freeze the index of the source for
        which the upper limit is being if you want to quote a limit with a
        fixed index.

    srcName -- the name of the source for which to compute the limit.

    cl -- probability level for the upper limit.

    verbosity -- verbosity level. A value of zero means no output will
        be written. With a value of one the function writes some values
        describing its progress, but the optimizers don't write
        anything. Values larger than one direct the optimizer to produce
        verbose output.

    skip_global_opt -- if the model is already at the global minimum
        value then you can direct the integrator to skip the initial step
        to find the minimum. If you specify this option and the model is
        NOT at the global minimum your results will likely be wrong.

    be_very_careful -- direct the integrator to be even more careful
        in integrating the function, by telling it to use a higher
        tolerance and to specifically pay attention to the peak in the
        likelihood function.  More evaluations of the integrand will be
        made, which WILL be slower and MAY result in a more accurate
        limit. NOT RECOMMENDED

    freeze_all -- freeze all other parameters at the values of the
        global minimum.

    delta_log_like_limits -- the limits on integration is defined by
        the region around the global maximum in which the log likelihood
        is close enough to the peak value. Too small a value will mean the
        integral does not include a significant amount of the likelihood
        function.  Too large a value may make the integrator miss the peak
        completely and get a bogus answer (although the
        \"be_very_careful\" option will help here).

    profile_optimizer -- Alternative optimizer to use when computing
        the profile, after the global minimum has been found.  Only set
        this if you want to use a different optimizer for calculating the
        profile than for calculating the global minimum.
         
    emin, emax -- Bounds on energy range over which the flux should be
        integrated.

    poi_values -- Points of interest: values of the normalization
        parameter corresponding to fluxes of interest to the user. The
        integrator will calculate the integral of the probability
        distribution to each of these values and return them in the vector
        \"results.poi_probs\". This parameter must be a vector, and can be
        empty.

  Outputs: (limit, results)

    limit -- the flux limit found.

    results -- a dictionary of additional results from the
        calculation, such as the value of the peak, the profile of the
        likelihood and two profile-likelihood upper-limits.
  """  
    saved_state = LikelihoodState(like)

    ###########################################################################
    #
    # This function has 4 main components:
    #
    # 1) Find the global maximum of the likelihood function using ST
    # 2) Define the integration limits by finding the points at which the
    #    log likelihood has fallen by a certain amount
    # 3) Integrate the function using the QUADPACK adaptive integrator
    # 4) Calculate the upper limit by re-integrating the function using
    #    the evaluations made by the adaptive integrator. Two schemes are
    #    tried, splines to the function points and trapezoidal quadrature.
    #
    ###########################################################################

    # Optimizer uses verbosity level one smaller than given here
    optverbosity = max(verbosity-1, 0)

    ###########################################################################
    #
    # 1) Find the global maximum of the likelihood function using ST
    #
    ###########################################################################

    par = like.normPar(srcName)

    fitstat = None
    if not skip_global_opt:
        # Make sure desired parameter is free during global optimization
        par.setFree(1)
        like.syncSrcParams(srcName)

        # Perform global optimization
        if verbosity:
            print "Finding global maximum"
        try:
            like.fit(optverbosity)
            fitstat = like.optObject.getRetCode()
            if verbosity and fitstat != 0:
                print "Minimizer returned with non-zero code: ",fitstat
        except RuntimeError:
            print "Failed to find global maximum, results may be wrong"
            pass
        pass
    
    original_optimizer = like.optimizer
    if profile_optimizer != None:
        like.optimizer = profile_optimizer

    # Store values of global fit
    maxval = -like()
    fitval = par.getValue()
    fiterr = par.error()
    limlo, limhi = par.getBounds()
    if verbosity:
        print "Maximum of %g with %s = %g +/- %g"\
              %(-maxval,srcName,fitval,fiterr)

    # Freeze all other model parameters if requested (much faster!)
    if(freeze_all):
        for i in range(len(like.model.params)):
            like.model[i].setFree(0)
            like.syncSrcParams(like[i].srcName)

    # Freeze the parameter of interest
    par.setFree(0)
    like.syncSrcParams(srcName)

    # Set up the caches for the optimum values and nuisance parameters
    optvalue_cache = dict()
    nuisance_cache = dict()
    optvalue_cache[fitval] = maxval
    _cache_nuisance(fitval, like, nuisance_cache)

    # Test if all parameters are frozen (could be true if we froze
    # them above or if they were frozen in the user's model
    all_frozen = True
    for i in range(len(like.model.params)):
        if like.model[i].isFree():
            all_frozen = False
            break

    ###########################################################################
    #
    # 2) Define the integration limits by finding the points at which the
    #    log likelihood has fallen by a certain amount
    #
    ###########################################################################

    if verbosity:
        print "Finding integration bounds (delta log Like=%g)"\
              %(delta_log_like_limits)

    [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals] = \
    _find_interval(like, par, srcName, all_frozen,
                   maxval, fitval, limlo, limhi,
                   delta_log_like_limits, verbosity, like.tol,
                   False, 5, optvalue_cache, nuisance_cache)

    if poi_values != None and len(poi_values)>0:
        xlo = max(min(xlo, min(poi_values)/2.0), limlo)
        xhi = min(max(xhi, max(poi_values)*2.0), limhi)

    if verbosity:
        print "Integration bounds: %g to %g (%d full fcn evals and %d approx)"\
              %(xlo,xhi,exact_root_evals,approx_root_evals)

    ###########################################################################
    #
    # 3) Integrate the function using the QUADPACK adaptive integrator
    #
    ###########################################################################

    #
    # Do integration using QUADPACK routine from SciPy -- the "quad"
    # routine uses adaptive quadrature, which *should* spend more time
    # evaluating the function where it counts the most.
    #
    points = []
    epsrel = (1.0-cl)*1e-3
    if be_very_careful:
        # In "be very careful" mode we explicitly tell "quad" that it
        # should examine more carefully the point at x=fitval, which
        # is the peak of the likelihood. We also use a tighter
        # tolerance value, but that seems to have a secondary effect.
        points = [ fitval ]
        epsrel = (1.0-cl)*1e-8

    if verbosity:
        print "Integrating probability distribution"

    nfneval = -len(optvalue_cache)
    f_of_x = dict()
    quad_ival, quad_ierr = \
          scipy.integrate.quad(_integrand, xlo, xhi,\
                               args = (f_of_x, like, par, srcName, maxval,\
                                       verbosity, all_frozen,
                                       optvalue_cache, nuisance_cache),\
                               points=points, epsrel=epsrel, epsabs=1)
    nfneval += len(optvalue_cache)

    if verbosity:
        print "Total integral: %g +/- %g (%d fcn evals)"\
              %(quad_ival,quad_ierr,nfneval)

    ###########################################################################
    #
    # 4) Calculate the upper limit by re-integrating the function using
    #    the evaluations made by the adaptive integrator. Two schemes are
    #    tried, splines to the function points and trapezoidal quadrature.
    #
    ###########################################################################

    # Calculation of the upper limit requires integrating up to
    # various test points, and finding the one that contains the
    # prescribed fraction of the probability. Using the "quad"
    # function to do this by evaluating the likelihood function
    # directly would be computationally prohibitive, it is preferable
    # to use the function evaluations that have been saved in the
    # "f_of_x" variable.

    # We try 2 different integration approaches on this data:
    # trapezoidal quadrature and integration of a fitted spline, with
    # the expectation that the spline will be better, but that perhaps
    # the trapezoidal might be more robust if the spline fit goes
    # crazy. The method whose results are closest to those from "quad"
    # is picked to do the search.
    
    # Organize values computed into two vectors x & y
    x = f_of_x.keys()
    x.sort()
    y=[]
    logy=[]
    for xi in x:
        y.append(f_of_x[xi])
        logy.append(math.log(f_of_x[xi]))

    # Evaluate upper limit using trapezoidal rule
    trapz_ival = scipy.integrate.trapz(y,x)
    cint = 0
    Cint = [ 0 ]
    for i in range(len(x)-1):
        cint += 0.5*(f_of_x[x[i+1]]+f_of_x[x[i]])*(x[i+1]-x[i])
        Cint.append(cint)
    int_irep = scipy.interpolate.interp1d(x, Cint)
    xlim_trapz = scipy.optimize.brentq(_int1droot, x[0], x[-1],
                                       args = (cl*cint, int_irep))
    ylim_trapz = int_irep(xlim_trapz).item()/cint

    # Evaluate upper limit using spline
    spl_irep = scipy.interpolate.splrep(x,y,xb=xlo,xe=xhi)
    spl_ival = scipy.interpolate.splint(xlo,xhi,spl_irep)
    xlim_spl = scipy.optimize.brentq(_splintroot, xlo, xhi, 
                                     args = (cl*spl_ival, xlo, spl_irep))
    ylim_spl = scipy.interpolate.splint(xlo,xlim_spl,spl_irep)/spl_ival

    # Test which is closest to QUADPACK adaptive method: TRAPZ or SPLINE
    if abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):
        # Evaluate upper limit using spline
        if verbosity:
            print "Using spline integral: %g (delta=%g)"\
                  %(spl_ival,abs(spl_ival/quad_ival-1))
        xlim = xlim_spl
        ylim = ylim_spl
        if verbosity:
            print "Spline search: %g (P=%g)"%(xlim,ylim)
    else:
        # Evaluate upper limit using trapezoidal rule
        if verbosity:
            print "Using trapezoidal integral: %g (delta=%g)"\
                  %(trapz_ival,abs(trapz_ival/quad_ival-1))
        xlim = xlim_trapz
        ylim = ylim_trapz
        if verbosity:
            print "Trapezoidal search: %g (P=%g)"%(xlim,cl)

    like.optimizer = original_optimizer

    ###########################################################################
    #
    # Since we have computed the profile likelihood, calculate the
    # right side of the 2-sided confidence region at the CL% and
    # 2*(CL-50)% levels under the assumption that the likelihood is
    # distributed as chi^2 of 1 DOF. Again, use the root finder on a
    # spline and linear representation of logL.
    #
    ###########################################################################

    profile_dlogL1 = -0.5*scipy.stats.chi2.isf(1-cl, 1)
    profile_dlogL2 = -0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)

    # The spline algorithm is prone to noise in the fitted logL,
    # especially in "be_very_careful" mode, so fall back to a linear
    # interpolation if necessary

    spl_drep = scipy.interpolate.splrep(x,logy,xb=xlo,xe=xhi)
    spl_pflux1 = scipy.optimize.brentq(_splevroot, fitval, xhi, 
                                       args = (profile_dlogL1, spl_drep))
    spl_pflux2 = scipy.optimize.brentq(_splevroot, fitval, xhi, 
                                       args = (profile_dlogL2, spl_drep))

    int_drep = scipy.interpolate.interp1d(x,logy)
    int_pflux1 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x), 
                                       args = (profile_dlogL1, int_drep))
    int_pflux2 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x), 
                                       args = (profile_dlogL2, int_drep))

    if (2.0*abs(int_pflux1-spl_pflux1)/abs(int_pflux1+spl_pflux1) > 0.05 or \
        2.0*abs(int_pflux2-spl_pflux2)/abs(int_pflux2+spl_pflux2) > 0.05):
        if verbosity:
            print "Using linear interpolation for profile UL estimate"
        profile_flux1 = int_pflux1
        profile_flux2 = int_pflux2
    else:
        if verbosity:
            print "Using spline interpolation for profile UL estimate"
        profile_flux1 = spl_pflux1
        profile_flux2 = spl_pflux2

    ###########################################################################
    #
    # Evaluate the probabilities of the "points of interest" using the integral
    #
    ###########################################################################

    poi_probs = [];
    poi_dlogL_interp = [];
    poi_chi2_equiv = [];

    for xval in poi_values:
        dLogL = None
        if(xval >= xhi):
            pval = 1.0
        elif(xval <= xlo):
            pval = 0.0
        # Same test as above to decide between TRAPZ and SPLINE
        elif abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):
            pval = scipy.interpolate.splint(xlo,xval,spl_irep)/spl_ival
            dlogL = scipy.interpolate.splev(xval, spl_drep)
        else:
            pval = int_irep(xval).item()/cint
            dlogL = int_drep(xval).item()                
        poi_probs.append(pval)
        poi_dlogL_interp.append(dlogL)
        poi_chi2_equiv.append(scipy.stats.chi2.isf(1-pval,1))

    ###########################################################################
    #        
    # Calculate the integral flux at the upper limit parameter value
    #
    ###########################################################################
    
    # Set the parameter value that corresponds to the desired C.L.
    par.setValue(xlim)

    # Evaluate the flux corresponding to this upper limit.
    ul_flux = like[srcName].flux(emin, emax)

    saved_state.restore()

    # Pack up all the results
    results = dict(all_frozen       = all_frozen,
                   ul_frac          = cl,
                   ul_flux          = ul_flux,
                   ul_value         = xlim,
                   ul_trapz         = xlim_trapz,
                   ul_spl           = xlim_spl,
                   int_limits       = [xlo, xhi],
                   profile_x        = x,
                   profile_y        = y,
                   peak_fitstatus   = fitstat,
                   peak_value       = fitval,
                   peak_dvalue      = fiterr,
                   peak_loglike     = maxval,
                   prof_ul_frac1    = cl,
                   prof_ul_dlogL1   = profile_dlogL1,
                   prof_ul_value1   = profile_flux1,
                   prof_ul_frac2    = 2*(cl-0.5),
                   prof_ul_dlogL2   = profile_dlogL2,
                   prof_ul_value2   = profile_flux2,
                   poi_values       = poi_values,
                   poi_probs        = poi_probs,
                   poi_dlogL_interp = poi_dlogL_interp,
                   poi_chi2_equiv   = poi_chi2_equiv,
                   flux_emin        = emin,
                   flux_emax        = emax)

    return ul_flux, results
def _find_interval(like, par, srcName, no_optimizer,
                   maxval, fitval, limlo, limhi,
                   delta_log_like_limits = 2.71/2, verbosity = 0, tol = 0.01, 
                   no_lo_bound_search = False, nloopmax = 5,
                   optvalue_cache = dict(), nuisance_cache = dict()):
    """Internal function to search for interval of the normalization
    parameter in which the log Likelihood is larger than predefined
    value. Used to find the upper limit in the profile method and to
    find sensible limits of integration in the Bayesian method. Use
    the SciPy Brent method root finder to do the search. Use new fast
    method for up to nloopmax iterations then fall back to old method."""

    subval = maxval - delta_log_like_limits
    search_xtol = limlo*0.1
    search_ytol = tol

    # 2010-06-11: NEW and FASTER algorithm to find integration
    # limits. Instead of evaluating the real function while searching
    # for the root (which requires calling the optimizer) we now
    # evaluate an approximate function, in which all the background
    # parameters are kept constant. When we find the root (flux) of
    # the approximate function then optimize at that flux to evaluate
    # how close the real function is there. Then repeat this up to
    # "nloopmax" times, after which revert to old method if we haven't
    # converged. Each time the real function is evaluated at the root
    # of the approximate it forces the approximate function in the
    # next iteration to equal the real function at that point (since
    # the background parameters values are changed to those optimized
    # at that point) and so the real and approximate functions get
    # closer and closer around the region of the roots.

    # 2009-04-16: modified to do logarithmic search before calling
    # Brent because the minimizer does not converge very well when it
    # is called alternatively at extreme ends of the flux range,
    # because the "nuisance" parameters are very far from their
    # optimal values from call to call. THIS COMMENT IS OBSOLETED
    # BY PREVIOUS COMMENT EXCEPT IF/WHEN NEW METHOD FAILS.

    exact_root_evals = -len(optvalue_cache)
    approx_root_evals = 0
    
    temp_saved_state = LikelihoodState(like)

    # HI BOUND

    xlft = fitval
    xrgt = limhi
    xtst = fitval
    ytst = delta_log_like_limits
    iloop = 0

    while (iloop<nloopmax) and (xrgt>xlft) and (abs(ytst)>search_ytol):
        approx_cache = dict()
        approx_cache[xtst] = ytst
        if _approxroot(xrgt,approx_cache,like,par,srcName,subval,verbosity)<0:
            xtst = scipy.optimize.brentq(_approxroot, xlft, xrgt,
                                         xtol=search_xtol, 
                                    args = (approx_cache,like,par,
                                            srcName,subval,verbosity))
        else:
            xtst = xrgt
        ytst = _root(xtst, like, par,srcName, subval, verbosity,
                     no_optimizer, optvalue_cache, nuisance_cache)
        if ytst<=0: xrgt=xtst
        else: xlft=xtst
        iloop += 1
        approx_root_evals += len(approx_cache)-1
        pass
    xhi = xtst
    yhi = ytst

    if (xrgt>xlft) and (abs(ytst)>search_ytol):
        xlft = fitval
        for ix in optvalue_cache:
            if(optvalue_cache[ix]-subval>0 and ix>xlft):
                xlft = ix
        xrgt = limhi
        for ix in optvalue_cache:
            if(optvalue_cache[ix]-subval<0 and ix<xrgt):
                xrgt = ix
        if(xrgt > max(xlft*10.0, xlft+(limhi-limlo)*1e-4)):
            xtst = max(xlft*10.0, xlft+(limhi-limlo)*1e-4)            
            while(xtst<xrgt and\
                  _root(xtst, like,par, srcName, subval, verbosity,
                        no_optimizer, optvalue_cache, nuisance_cache)>=0):
                xtst *= 10.0
            if(xtst<xrgt):
                xrgt = xtst
        if xrgt>limhi: xrgt=limhi
        if xrgt<limhi or \
               _root(xrgt, like, par, srcName, subval, verbosity,
                     no_optimizer, optvalue_cache, nuisance_cache)<0:
            xhi = scipy.optimize.brentq(_root, xlft, xrgt, xtol=search_xtol,
                                        args = (like,par,srcName,\
                                                subval,verbosity,no_optimizer,
                                                optvalue_cache,nuisance_cache))
            pass
        yhi = _root(xhi, like, par, srcName, subval, verbosity,
                    no_optimizer, optvalue_cache, nuisance_cache)
        pass

    temp_saved_state.restore()

    # LO BOUND

    if(no_lo_bound_search):
        xlo = fitval
        ylo = maxval
        exact_root_evals += len(optvalue_cache)
        return [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals]
    
    xlft = limlo
    xrgt = fitval
    xtst = fitval
    ytst = delta_log_like_limits
    iloop = 0

    while (iloop<nloopmax) and (xrgt>xlft) and (abs(ytst)>search_ytol):
        approx_cache = dict()        
        approx_cache[xtst] = ytst
        if _approxroot(xlft,approx_cache,like,par,srcName,subval,verbosity)<0:
            xtst = scipy.optimize.brentq(_approxroot, xlft, xrgt,
                                         xtol=search_xtol, 
                                         args = (approx_cache,like,par,
                                                 srcName,subval,verbosity))
        else:
            xtst = xlft
        ytst = _root(xtst, like, par, srcName, subval, verbosity,
                     no_optimizer, optvalue_cache, nuisance_cache)
        if ytst<=0: xlft=xtst
        else: xrgt=xtst
        approx_root_evals += len(approx_cache)-1
        iloop += 1
        pass
    xlo = xtst
    ylo = ytst

    if (xrgt>xlft) and (abs(ytst)>search_ytol):
        xrgt = fitval
        for ix in optvalue_cache:
            if(optvalue_cache[ix]-subval>0 and ix<xrgt):
                xrgt = ix
        xlft = limlo
        for ix in optvalue_cache:
            if(optvalue_cache[ix]-subval<0 and ix<xlft):
                xlft = ix
        if(xlft < min(xrgt*0.1, xrgt-(limhi-limlo)*1e-4)):
            xtst = min(xrgt*0.1, xrgt-(limhi-limlo)*1e-4)            
            while(xtst>xlft and\
                  _root(xtst, like,par, srcName, subval, verbosity,
                        no_optimizer, optvalue_cache, nuisance_cache)>=0):
                xtst *= 0.1
            if(xtst>xlft):
                xlft = xtst
        if xlft<limlo: xlft=limlo
        if xlft>limlo or \
               _root(xlft, like, par, srcName, subval, verbosity,
                     no_optimizer, optvalue_cache, nuisance_cache)<0:
            xlo = scipy.optimize.brentq(_root, xlft, xrgt, xtol=search_xtol,
                                        args = (like,par,srcName,\
                                                subval,verbosity,no_optimizer,
                                                optvalue_cache,nuisance_cache))
            pass
        ylo = _root(xlo, like, par, srcName, subval, verbosity,
                    no_optimizer, optvalue_cache, nuisance_cache)
        pass

    temp_saved_state.restore()

    exact_root_evals += len(optvalue_cache)
    return [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals]
Example #13
0
 def saveCurrentFit(self, negLogLike=None):
     self.saved_state = LikelihoodState(self, negLogLike)
Example #14
0
    def _extension(self, name, **kwargs):

        spatial_model = kwargs['spatial_model']
        width_min = kwargs['width_min']
        width_max = kwargs['width_max']
        width_nstep = kwargs['width_nstep']
        width = kwargs['width']
        free_background = kwargs['free_background']
        free_radius = kwargs.get('free_radius', None)
        fix_shape = kwargs.get('fix_shape', False)
        make_tsmap = kwargs.get('make_tsmap', False)
        update = kwargs['update']
        sqrt_ts_threshold = kwargs['sqrt_ts_threshold']

        if kwargs['psf_scale_fn']:
            def psf_scale_fn(t): return 1.0 + np.interp(np.log10(t),
                                                        kwargs['psf_scale_fn'][0],
                                                        kwargs['psf_scale_fn'][1])
        else:
            psf_scale_fn = None

        saved_state = LikelihoodState(self.like)
        loglike_init = -self.like()
        self.logger.debug('Initial Model Log-Likelihood: %f', loglike_init)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [s.name for s in
                         self.roi.get_sources(skydir=skydir,
                                              distance=free_radius,
                                              exclude=diff_sources)]
            self.free_sources_by_name(free_srcs, pars='norm',
                                      loglevel=logging.DEBUG)

        # Fit baseline model
        self.free_source(name, loglevel=logging.DEBUG)
        if fix_shape:
            self.free_source(name, free=False, pars='shape',
                             loglevel=logging.DEBUG)

        fit_output = self._fit(loglevel=logging.DEBUG, **kwargs['optimizer'])
        src = self.roi.copy_source(name)

        # Save likelihood value for baseline fit
        saved_state_base = LikelihoodState(self.like)
        loglike_base = fit_output['loglike']
        self.logger.debug('Baseline Model Log-Likelihood: %f', loglike_base)

        if not width:
            width = np.logspace(np.log10(width_min), np.log10(width_max),
                                width_nstep)

        width = np.array(width)
        width = width[width > 0]
        width = np.concatenate(([0.0], np.array(width)))

        o = defaults.make_default_tuple(defaults.extension_output)
        o.name = name
        o.width = width
        o.dloglike = np.zeros(len(width) + 1)
        o.loglike = np.zeros(len(width) + 1)
        o.loglike_base = loglike_base
        o.loglike_init = loglike_init
        o.config = kwargs
        o.ebin_ext = np.ones(self.enumbins) * np.nan
        o.ebin_ext_err = np.ones(self.enumbins) * np.nan
        o.ebin_ext_err_lo = np.ones(self.enumbins) * np.nan
        o.ebin_ext_err_hi = np.ones(self.enumbins) * np.nan
        o.ebin_ext_ul95 = np.ones(self.enumbins) * np.nan
        o.ebin_ts_ext = np.ones(self.enumbins) * np.nan
        o.ebin_loglike = np.ones((self.enumbins, len(width))) * np.nan
        o.ebin_dloglike = np.ones((self.enumbins, len(width))) * np.nan
        o.ebin_loglike_ptsrc = np.ones(self.enumbins) * np.nan
        o.ebin_loglike_ext = np.ones(self.enumbins) * np.nan
        o.ebin_e_min = self.energies[:-1]
        o.ebin_e_max = self.energies[1:]
        o.ebin_e_ctr = np.sqrt(o.ebin_e_min * o.ebin_e_max)

        self.logger.debug('Width scan vector:\n %s', width)

        if kwargs['fit_position']:
            ext_fit = self._fit_extension_full(name,
                                               spatial_model=spatial_model,
                                               optimizer=kwargs['optimizer'])
        else:
            ext_fit = self._fit_extension(name,
                                          spatial_model=spatial_model,
                                          optimizer=kwargs['optimizer'],
                                          psf_scale_fn=psf_scale_fn)

        o.update(ext_fit)

        # Fit with the best-fit extension model
        self.logger.info('Fitting extended-source model.')

        self.set_source_morphology(name, spatial_model=spatial_model,
                                   spatial_pars={'ra': o['ra'], 'dec': o['dec'],
                                                 'SpatialWidth': o['ext']},
                                   use_pylike=False,
                                   psf_scale_fn=psf_scale_fn)

        # Perform scan over width parameter
        o.loglike = self._scan_extension(name,
                                         spatial_model=spatial_model,
                                         width=width,
                                         optimizer=kwargs['optimizer'],
                                         psf_scale_fn=psf_scale_fn)

        self.set_source_morphology(name, spatial_model=spatial_model,
                                   spatial_pars={'ra': o['ra'], 'dec': o['dec'],
                                                 'SpatialWidth': o['ext']},
                                   use_pylike=False,
                                   psf_scale_fn=psf_scale_fn)

        fit_output = self._fit(loglevel=logging.DEBUG, update=False,
                               **kwargs['optimizer'])

        o.source_fit = self.get_src_model(name, reoptimize=True,
                                          optimizer=kwargs['optimizer'])
        o.loglike_ext = fit_output['loglike']

        if kwargs['fit_ebin']:
            self._fit_extension_ebin(name, o, **kwargs)

        if kwargs['save_model_map']:
            o.ext_tot_map = self.model_counts_map()
            o.ext_src_map = self.model_counts_map(name)
            o.ext_bkg_map = self.model_counts_map(exclude=[name])

        if make_tsmap:
            tsmap_model = {'SpatialModel': 'RadialDisk',
                           'SpatialWidth': 0.1 * 0.8246211251235321}
            tsmap_model.update(src.spectral_pars)
            self.logger.info('Generating TS map.')
            tsmap = self.tsmap(model=tsmap_model,
                               map_skydir=SkyCoord(
                                   o['ra'], o['dec'], unit='deg'),
                               map_size=max(1.0, 4.0 * o['ext']),
                               exclude=[name],
                               write_fits=False,
                               write_npy=False,
                               use_pylike=False,
                               make_plots=False,
                               loglevel=logging.DEBUG)
            o.tsmap = tsmap['ts']

        self.logger.info('Testing point-source model.')
        # Test point-source hypothesis
        self.set_source_morphology(name, spatial_model='PointSource',
                                   use_pylike=False,
                                   psf_scale_fn=psf_scale_fn)

        # Fit a point-source
        saved_state_base.restore()
        self.logger.debug('Fitting point-source model.')
        fit_output = self._fit(loglevel=logging.DEBUG, **kwargs['optimizer'])

        if src['SpatialModel'] == 'PointSource' and kwargs['fit_position']:
            loc = self.localize(name, update=False)
            o.loglike_ptsrc = loc['loglike_loc']
        else:
            o.loglike_ptsrc = fit_output['loglike']

        o.dloglike = o.loglike - o.loglike_ptsrc
        o.ts_ext = 2 * (o.loglike_ext - o.loglike_ptsrc)
        self.logger.debug('Point-Source Model Likelihood: %f', o.loglike_ptsrc)

        if kwargs['save_model_map']:
            o.ptsrc_tot_map = self.model_counts_map()
            o.ptsrc_src_map = self.model_counts_map(name)
            o.ptsrc_bkg_map = self.model_counts_map(exclude=[name])

        if update and (sqrt_ts_threshold is None or
                       np.sqrt(o['ts_ext']) > sqrt_ts_threshold):
            src = self.delete_source(name, loglevel=logging.DEBUG)
            src.set_spatial_model(spatial_model,
                                  {'ra': o.ra, 'dec': o.dec,
                                   'SpatialWidth': o.ext})
            # FIXME: Issue with source map cache with source is
            # initialized as fixed.
            self.add_source(name, src, free=True, loglevel=logging.DEBUG)
            self.free_source(name, loglevel=logging.DEBUG)
            if fix_shape:
                self.free_source(name, free=False, pars='shape',
                                 loglevel=logging.DEBUG)
            fit_output = self.fit(loglevel=logging.DEBUG,
                                  **kwargs['optimizer'])
            o.loglike_ext = fit_output['loglike']

            src = self.roi.get_source_by_name(name)
            if kwargs['fit_position']:
                for k in ['ra_err', 'dec_err', 'glon_err', 'glat_err',
                          'pos_err', 'pos_err_semimajor', 'pos_err_semiminor',
                          'pos_r68', 'pos_r95', 'pos_r99', 'pos_angle']:
                    src[k] = o[k]

        else:
            self.set_source_morphology(name, spatial_model=src['SpatialModel'],
                                       spatial_pars=src.spatial_pars,
                                       update_source=False)
            # Restore ROI to previous state
            saved_state.restore()
            self._sync_params(name)
            self._update_roi()

        self.logger.info('Best-fit extension: %6.4f + %6.4f - %6.4f'
                         % (o['ext'], o['ext_err_hi'], o['ext_err_lo']))
        self.logger.info('TS_ext:        %.3f' % o['ts_ext'])
        self.logger.info('Extension UL: %6.4f' % o['ext_ul95'])
        self.logger.info('LogLike: %12.3f DeltaLogLike: %12.3f',
                         o.loglike_ext, o.loglike_ext - o.loglike_init)

        if kwargs['fit_position']:
            self.logger.info('Position:\n'
                             '(  ra, dec) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
                             '(glon,glat) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
                             'offset = %8.4f r68 = %8.4f r95 = %8.4f r99 = %8.4f',
                             o.ra, o.ra_err, o.dec, o.dec_err,
                             o.glon, o.glon_err, o.glat, o.glat_err,
                             o.pos_offset, o.pos_r68, o.pos_r95, o.pos_r99)

        return o
Example #15
0
    def _calculate(self, like):
        """ Compute the flux data points for each energy. """

        name = self.name
        verbosity = self.verbosity
        init_energies = like.energies[[0, -1]]

        # Freeze all sources except one to make sed of.
        all_sources = like.sourceNames()

        if name not in all_sources:
            raise Exception("Cannot find source %s in list of sources" % name)

        # make copy of parameter values + free parameters

        saved_state = LikelihoodState(like)

        if self.freeze_background:
            if verbosity: print 'Freezing all parameters'
            # freeze all other sources
            for i in range(len(like.model.params)):
                like.freeze(i)

        # convert source to a PowerLaw of frozen index

        source = like.logLike.getSource(name)
        old_spectrum = source.spectrum()
        like.setSpectrum(name, 'PowerLaw')

        index = like[like.par_index(name, 'Index')]
        index.setTrueValue(self.powerlaw_index)
        index.setFree(0)
        like.syncSrcParams(name)

        # assume a canonical dnde=1e-11 at 1GeV index 2 starting value
        dnde_start = 1e-11 * (self.energy / 1e3)**(-2)

        optverbosity = max(verbosity - 1, 0)  # see IntegralUpperLimit.py

        for i, (e, lower, upper) in enumerate(
                zip(self.energy, self.lower_energy, self.upper_energy)):

            if verbosity:
                print 'Calculating spectrum from %.0dMeV to %.0dMeV' % (lower,
                                                                        upper)

            # goot starting guess for source
            prefactor = like[like.par_index(name, 'Prefactor')]
            prefactor.setScale(dnde_start[i])
            prefactor.setValue(1)
            prefactor.setBounds(1e-10, 1e10)

            scale = like[like.par_index(name, 'Scale')]
            scale.setScale(1)
            scale.setValue(e)
            like.syncSrcParams(name)

            like.setEnergyRange(float(lower) + 1, float(upper) - 1)

            try:
                like.fit(optverbosity, covar=True)
            except Exception, ex:
                if verbosity: print 'ERROR gtlike fit: ', ex

            self.ts[i] = like.Ts(name, reoptimize=self.reoptimize_ts)

            prefactor = like[like.par_index(name, 'Prefactor')]
            self.dnde[i] = prefactor.getTrueValue()

            if self.do_minos:
                if verbosity:
                    print 'Calculating minos errors from %.0dMeV to %.0dMeV' % (
                        lower, upper)
                self.dnde_lower_err[i], self.dnde_upper_err[
                    i] = like.minosError(name, 'Prefactor')
                self.dnde_lower_err[i] *= (
                    -1) * prefactor.getScale()  # make lower errors positive
                self.dnde_upper_err[i] *= prefactor.getScale()
                self.dnde_err[i] = (self.dnde_upper_err[i] +
                                    self.dnde_lower_err[i]) / 2
            else:
                self.dnde_err[i] = prefactor.parameter.error(
                ) * prefactor.parameter.getScale()

            self.flux[i] = like.flux(name, lower, upper)
            self.flux_err[i] = like.fluxError(name, lower, upper)

            self.eflux[i] = like.energyFluxError(name, lower, upper)
            self.eflux_err[i] = like.energyFluxError(name, lower, upper)

            if self.ts[i] < self.min_ts or self.always_upper_limit:
                if verbosity:
                    print 'Calculating upper limit from %.0dMeV to %.0dMeV' % (
                        lower, upper)
                self.dnde_ul[i], self.flux_ul[i], self.eflux_ul[
                    i] = SED.upper_limit(like,
                                         name,
                                         self.ul_algorithm,
                                         lower,
                                         upper,
                                         confidence=self.ul_confidence,
                                         verbosity=verbosity)

            if verbosity:
                print lower, upper, self.dnde[i], self.dnde_err[i], self.ts[
                    i], self.dnde_ul[i]

            self.npred[i] = like.NpredValue(name)
Example #16
0
class SummedLikelihood(AnalysisBase):
    def __init__(self, optimizer='Minuit'):
        self.composite = pyLike.SummedLikelihood()
        #the C++ SummedLikelihood has many if not all the properties
        #of a logLike object
        self.logLike = self.composite
        self.components = []
        self.covariance = None
        self.covar_is_current = False
        self.optObject = None
        self.optimizer = optimizer
        self.tolType = pyLike.ABSOLUTE
        self.tol = 1e-2
        self.saved_state = None

    def sourceNames(self):
        return self.components[0].sourceNames()

    def addComponent(self, like):
        self.composite.addComponent(like.logLike)
        self.components.append(like)
        if len(self.components) == 1:
            self.model = self.components[0].model

    def syncSrcParams(self, src=None):
        if src is not None:
            for comp in self.components:
                comp.logLike.syncSrcParams(src)
        else:
            for comp in self.components:
                for src in self.sourceNames():
                    comp.logLike.syncSrcParams(src)

    def fit(self,
            verbosity=3,
            tol=None,
            optimizer=None,
            covar=False,
            optObject=None):
        if tol is None:
            tol = self.tol
        errors = self._errors(optimizer,
                              verbosity,
                              tol,
                              covar=covar,
                              optObject=optObject)
        negLogLike = -self.composite.value()
        self.saveBestFit(negLogLike)
        return negLogLike

    def optimize(self, verbosity=3, tol=None, optimizer=None):
        self._syncParams()
        if optimizer is None:
            optimizer = self.optimizer
        if tol is None:
            tol = self.tol
        optFactory = pyLike.OptimizerFactory_instance()
        myOpt = optFactory.create(optimizer, self.composite)
        myOpt.find_min_only(verbosity, tol, self.tolType)
        self.saveBestFit()

    def normPar(self, source):
        return Parameter([like.normPar(source) for like in self.components])

    def __call__(self):
        negLogLike = -self.composite.value()
        self.saveBestFit(negLogLike)
        return negLogLike

    def sourceNames(self):
        return self.components[0].sourceNames()

    def params(self):
        my_params = []
        for i, like in enumerate(self.components):
            for j, par in enumerate(like.params()):
                if i == 0:
                    my_params.append(Parameter([par]))
                else:
                    my_params[j].addParam(par)
        return my_params

    def nFreeParams(self):
        '''Count the number of free parameters in the active model.'''
        nF = 0
        pars = self.params()
        for par in pars:
            if par.isFree():
                nF += 1
        return nF

    def saveBestFit(self, negLogLike=None):
        if negLogLike is None:
            negLogLike = -self.composite.value()
        if (self.saved_state is None
                or negLogLike <= self.saved_state.negLogLike):
            self.saveCurrentFit(negLogLike)

    def saveCurrentFit(self, negLogLike=None):
        self.saved_state = LikelihoodState(self, negLogLike)

    def restoreBestFit(self):
        if (self.saved_state is not None
                and self() > self.saved_state.negLogLike):
            self.saved_state.restore()
        else:
            self.saveCurrentFit()

    def NpredValue(self, src, weighted=False):
        return self.composite.NpredValue(src, weighted)

    def total_nobs(self, weighted=False):
        return sum([x.total_nobs(weighted) for x in self.components])

    def __repr__(self):
        return str(self.components[0].model)

    def _syncParams(self):
        for component in self.components:
            component.logLike.syncParams()

    def __getitem__(self, name):
        item = self.model[name]
        try:
            item.type
            return item
        except AttributeError:
            par = Parameter([item])
            for comp in self.components[1:]:
                par.addParam(comp[name])
            return par

    def __setitem__(self, name, value):
        for component in self.components:
            component[name] = value
            component.syncSrcParams(self.model[name].srcName)

    def thaw(self, i):
        for component in self.components:
            component.thaw(i)
        self.saved_state = None

    def freeze(self, i):
        for component in self.components:
            component.freeze(i)
        self.saved_state = None

    def _errors(self,
                optimizer=None,
                verbosity=0,
                tol=None,
                useBase=False,
                covar=False,
                optObject=None):
        self._syncParams()
        if optimizer is None:
            optimizer = self.optimizer
        if tol is None:
            tol = self.tol
        if optObject is None:
            optFactory = pyLike.OptimizerFactory_instance()
            myOpt = optFactory.create(optimizer, self.composite)
        else:
            myOpt = optObject
        self.optObject = myOpt
        myOpt.find_min(verbosity, tol, self.tolType)
        errors = myOpt.getUncertainty(useBase)
        if covar:
            self.covariance = myOpt.covarianceMatrix()
            self.covar_is_current = True
        else:
            self.covar_is_current = False
        self._set_errors(errors)
        return errors

    def _set_errors(self, errors):
        source_attributes = self.components[0].getExtraSourceAttributes()
        my_errors = list(errors)
        self.composite.setErrors(my_errors)
        for component in self.components:
            component.model = SourceModel(component.logLike)
            for src in source_attributes:
                component.model[src].__dict__.update(source_attributes[src])

    def minosError(self, srcname, parname, level=1):
        freeParams = pyLike.ParameterVector()
        self.composite.getFreeParams(freeParams)
        saved_values = [par.getValue() for par in freeParams]
        par_index = self.components[0].par_index(srcname, parname)
        index = self.composite.findIndex(par_index)
        if index == -1:
            raise RuntimeError("Invalid parameter specification")
        try:
            errors = self.optObject.Minos(index, level)
            self.composite.setFreeParamValues(saved_values)
            return errors
        except RuntimeError, message:
            print "Minos error encountered for parameter %i" % index
            self.composite.setFreeParamValues(saved_values)
Example #17
0
 def wrapper(self, *args, **kwargs):
     saved_state = LikelihoodState(self.like)
     o = func(self, *args, **kwargs)
     saved_state.restore()
     return o
Example #18
0
    def _tsmap_pylike(self, prefix, **kwargs):
        """Evaluate the TS for an additional source component at each point
        in the ROI.  This is the brute force implementation of TS map
        generation that runs a full pyLikelihood fit
        at each point in the ROI."""

        logLike0 = -self.like()
        self.logger.info('LogLike: %f' % logLike0)

        saved_state = LikelihoodState(self.like)

        # Get the ROI geometry

        # Loop over pixels
        w = copy.deepcopy(self._skywcs)
        #        w = create_wcs(self._roi.skydir,cdelt=self._binsz,crpix=50.5)

        data = np.zeros((self.npix, self.npix))
        #        self.free_sources(free=False)

        xpix = (np.linspace(0, self.npix - 1, self.npix)[:, np.newaxis] *
                np.ones(data.shape))
        ypix = (np.linspace(0, self.npix - 1, self.npix)[np.newaxis, :] *
                np.ones(data.shape))

        radec = wcs_utils.pix_to_skydir(xpix, ypix, w)
        radec = (np.ravel(radec.ra.deg), np.ravel(radec.dec.deg))

        testsource_dict = {
            'ra': radec[0][0],
            'dec': radec[1][0],
            'SpectrumType': 'PowerLaw',
            'Index': 2.0,
            'Scale': 1000,
            'Prefactor': {'value': 0.0, 'scale': 1e-13},
            'SpatialModel': 'PSFSource',
        }

        #        src = self.roi.get_source_by_name('tsmap_testsource')

        for i, (ra, dec) in enumerate(zip(radec[0], radec[1])):
            testsource_dict['ra'] = ra
            testsource_dict['dec'] = dec
            #                        src.set_position([ra,dec])
            self.add_source('tsmap_testsource', testsource_dict, free=True,
                            init_source=False, save_source_maps=False)

            #            for c in self.components:
            #                c.update_srcmap_file([src],True)

            self.set_parameter('tsmap_testsource', 'Prefactor', 0.0,
                               update_source=False)
            self.fit(loglevel=logging.DEBUG, update=False)

            logLike1 = -self.like()
            ts = max(0, 2 * (logLike1 - logLike0))

            data.flat[i] = ts

            #            print i, ra, dec, ts
            #            print self.like()
            # print self.components[0].like.model['tsmap_testsource']

            self.delete_source('tsmap_testsource')

        saved_state.restore()

        outfile = os.path.join(self.config['fileio']['workdir'], 'tsmap.fits')
        fits_utils.write_fits_image(data, w, outfile)
class ROILikelihoodOptimizer:
    """Class to optimize Likelihood of ROI model (a replacement for gtlike)."""
    def __init__(self, like, sourcesOfInterest=None, optimizer="Minuit",
                 tol=1e-8, chatter=3, 
                 freeze_nuisance_sources_immediately = False,
                 freeze_nuisance_sources_after_optimize = False,
                 nuisance_npred_frac_max = 0.05,
                 nuisance_soi_sep_min_deg = 5.0,
                 calculate_full_ts_for_all = False):
        self.ver = "$Id: ROILikelihoodOptimizer.py 2795 2011-05-06 14:48:45Z sfegan $"
        self.res = {}
        self._like = like
        self._original_state = LikelihoodState(self._like)
        self._chatter = 0
        self._SOI = []
        self._freeze_nuisance_immediately = freeze_nuisance_sources_immediately
        self._freeze_nuisance_after_optimize = \
            freeze_nuisance_sources_after_optimize
        self._nuisance_npred_frac_max = nuisance_npred_frac_max
        self._nuisance_soi_sep_min_deg = nuisance_soi_sep_min_deg
        self._calculate_full_ts_for_all = calculate_full_ts_for_all

        if sourcesOfInterest is not None:
            if type(sourcesOfInterest) != list:
                sourcesOfInterest = [ sourcesOfInterest ]
            for s in sourcesOfInterest:
                if s in like.sourceNames():
                    self._SOI.append(s)
                else:
                    raise RuntimeError("Invalid source of interest: " + s)
        if optimizer:
            self._like.optimizer = optimizer
        if tol:
            self._like.tol = tol
        if chatter:
            self._chatter = chatter
        
    def _nuisanceSourceNames(self, SOI = None, sep_min_deg = None, 
                             npred_frac_max = None):
        if SOI is None:
            SOI = self._SOI
        if type(SOI) != list:
            SOI = [ SOI ]
        if npred_frac_max is None:
            npred_frac_max = self._nuisance_npred_frac_max
        if sep_min_deg is None:
            sep_min_deg = self._nuisance_soi_sep_min_deg
        sep_min_rad = sep_min_deg/180.0*math.pi
        nuisance = []
        npred_total = 0
        for sn in self._like.sourceNames():
            npred_total += self._like.NpredValue(sn)
        npred_thresh = npred_total * npred_frac_max
        for sn in self._like.sourceNames():
            if sn in SOI:
                continue
            s = self._like[sn]
            if s.type != 'PointSource':
                continue
            ps = pyLike.PointSource_cast(s.src)
            if ps.fixedSpectrum():
                continue
            if self._like.NpredValue(sn) > npred_thresh:
                continue
            d = ps.getDir()
            min_sep = math.pi
            for soi_sn in SOI:
                soi_s = self._like[soi_sn]
                soi_ps = pyLike.PointSource_cast(soi_s.src)
                soi_d = soi_ps.getDir()
                sep = d.difference(soi_d)
                if(sep < min_sep):
                    min_sep = sep
            if min_sep < sep_min_rad:
                continue
            nuisance.append(sn)
        return nuisance

    def _freeze_sources(self):
        nuisance_sources = self._nuisanceSourceNames()
        for sn in nuisance_sources:
            srcfreepar = self._like.freePars(sn)
            self._like.setFreeFlag(sn, srcfreepar, False)
            self._like.syncSrcParams(sn)
        return nuisance_sources

    def restoreOriginalState():
        self._original_state.restore()

    def run(self, noFit=False):
        L = self._like
        start_state = LikelihoodState(L)

        if(self._freeze_nuisance_immediately):
            nuisance_sources = self._freeze_sources()
            if self._chatter > 0:
                for sn in nuisance_sources:
                    print "Freezing source:",sn 
        
        if(self._freeze_nuisance_after_optimize):
            L.optimize()
            nuisance_sources = self._freeze_sources()
            if self._chatter > 0:
                for sn in nuisance_sources:
                    print "Freezing source:",sn 

        if not noFit:
            L.fit()
     
        res = {}
        res['version']                  = self.ver
        res['like_val']                 = L.logLike.value()
        res['fit_state']                = L.optObject.getRetCode()
        res['cov_matrix']               = L.optObject.covarianceMatrix()

        res['src'] = {}
        npred_total = 0
        nfree_param = 0
        for sn in L.sourceNames():
            s = L[sn]
            ss = s.src

            src_info = {}
            src_info["type"]            = ss.getType()
            src_info["fixed"]           = ss.fixedSpectrum()

            if not src_info["fixed"] and src_info["type"] == 'Point':
                if self._chatter > 0:
                    print "Calculating approximate TS for:",sn
                src_info['TS_approx']   = L.Ts(sn,reoptimize=False)
                if sn in self._SOI or self._calculate_full_ts_for_all:
                    if self._chatter > 0:
                        print "Calculating full TS for:",sn
                    src_info['TS']      = L.Ts(sn,reoptimize=True)

            # Spectrum parameters
            spec = ss.spectrum()
            spec_param_names = s.funcs['Spectrum'].paramNames
            src_info["spec_type"]       = spec.genericName()
            src_info["spec_free_par"]   = {}
            spec_info = {}
            for pn in spec_param_names:
                param = spec.getParam(pn)
                param_info = {}
                param_info['free']            = param.isFree()
                if param.isFree():
                    src_info["spec_free_par"][pn] = nfree_param
                    param_info['free_iparam'] = nfree_param
                    param_info['error']       = param.error()
                    param_info['true_error']  = param.error()*param.getScale()
                    nfree_param+=1
                param_info['true_value']      = param.getTrueValue()
                param_info['value']           = param.getValue()
                param_info['scale']           = param.getScale()
                param_info['bounds']          = param.getBounds()
                spec_info[pn] = param_info
            cov_m = []
            for ipn in src_info["spec_free_par"]:
                cov = {}
                cov_v = []
                for jpn in src_info["spec_free_par"]:
                    iparam = spec_info[ipn]['free_iparam']
                    jparam = spec_info[jpn]['free_iparam']
                    cov[jpn] = res['cov_matrix'][iparam][jparam]
                    cov_v.append(cov[jpn])
                spec_info[ipn]['cov'] = cov
                cov_m.append(cov_v)
            src_info["spec_par"]     = spec_info
            src_info["spec_cov"]     = cov_m
            cov_m = numpy.matrix(cov_m)

            # Flux value, derivatives and error
            info = {}
            info['value']              = ss.flux()
            if not ss.fixedSpectrum():
                info["deriv"]          = {}
                v=[]
                for pn in src_info["spec_free_par"]:
                    x = ss.fluxDeriv(pn)
                    info["deriv"][pn]  = x
                    v.append(x)
                v = numpy.matrix(v)
                src_info["error"]      = math.sqrt(v*cov_m*v.transpose())
            src_info["flux"]         = info

            # Energy flux value, derivatives and error
            info = {}
            info['value']              = ss.energyFlux()
            if not ss.fixedSpectrum():
                info["deriv"]          = {}
                v=[]
                for pn in src_info["spec_free_par"]:
                    x = ss.energyFluxDeriv(pn)
                    info["deriv"][pn]  = x
                    v.append(x)
                v = numpy.matrix(v)
                info["error"]          = math.sqrt(v*cov_m*v.transpose())
            src_info["energy_flux"]  = info 

            # Npred value, derivatives and error
            npred = L.NpredValue(sn)
            npred_total += npred
            info = {}
            info['value']              = npred
#            if not ss.fixedSpectrum():
#                info["deriv"]          = {}
#                v = []
#                for pn in src_info["spec_free_par"]:
#                    x = 0 #ss.NpredDeriv(pn) CRASH
#                    info["deriv"][pn]  = x
#                    v.append(x)
#                v = numpy.matrix(v)
#                info["error"]          = math.sqrt(v*cov_m*v.transpose())
            src_info["npred"]        = info
 
            # Spectral plots

            # Set source results
            res['src'][sn] = src_info
            
        res['total_nobs']               = L.total_nobs();
        res['total_npred']              = npred_total
        
        self.res = res
Example #20
0
    def _localize(self, name, **kwargs):

        nstep = kwargs.get('nstep')
        dtheta_max = kwargs.get('dtheta_max')
        update = kwargs.get('update', True)
        prefix = kwargs.get('prefix', '')
        use_cache = kwargs.get('use_cache', False)
        free_background = kwargs.get('free_background', False)
        free_radius = kwargs.get('free_radius', None)
        fix_shape = kwargs.get('fix_shape', False)

        saved_state = LikelihoodState(self.like)
        loglike_init = -self.like()
        self.logger.debug('Initial Model Log-Likelihood: %f', loglike_init)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [
                s.name for s in self.roi.get_sources(
                    skydir=skydir, distance=free_radius, exclude=diff_sources)
            ]
            self.free_sources_by_name(free_srcs,
                                      pars='norm',
                                      loglevel=logging.DEBUG)

        src = self.roi.copy_source(name)
        skydir = src.skydir
        skywcs = self._skywcs
        src_pix = skydir.to_pixel(skywcs)

        fit0 = self._fit_position_tsmap(name,
                                        prefix=prefix,
                                        dtheta_max=dtheta_max,
                                        zmin=-3.0,
                                        use_pylike=False)

        self.logger.debug(
            'Completed localization with TS Map.\n'
            '(ra,dec) = (%10.4f,%10.4f) '
            '(glon,glat) = (%10.4f,%10.4f)', fit0['ra'], fit0['dec'],
            fit0['glon'], fit0['glat'])

        # Fit baseline (point-source) model
        self.free_source(name, loglevel=logging.DEBUG)
        if fix_shape:
            self.free_source(name,
                             free=False,
                             pars='shape',
                             loglevel=logging.DEBUG)
        fit_output = self._fit(loglevel=logging.DEBUG,
                               **kwargs.get('optimizer', {}))

        # Save likelihood value for baseline fit
        loglike_base = fit_output['loglike']
        self.logger.debug('Baseline Model Log-Likelihood: %f', loglike_base)

        o = defaults.make_default_tuple(defaults.localize_output)
        o.name = name
        o.config = kwargs
        o.fit_success = True
        o.loglike_init = loglike_init
        o.loglike_base = loglike_base
        o.loglike_loc = np.nan
        o.dloglike_loc = np.nan

        if fit0['fit_success']:
            scan_cdelt = 2.0 * fit0['pos_r95'] / (nstep - 1.0)
        else:
            scan_cdelt = np.abs(skywcs.wcs.cdelt[0])

        self.logger.debug(
            'Refining localization search to '
            'region of width: %.4f deg', scan_cdelt * nstep)

        fit1 = self._fit_position_scan(name,
                                       skydir=fit0['skydir'],
                                       scan_cdelt=scan_cdelt,
                                       **kwargs)

        o.loglike_loc = fit1['loglike']
        o.dloglike_loc = o.loglike_loc - o.loglike_base
        o.tsmap = fit0.pop('tsmap')
        o.tsmap_peak = fit1.pop('tsmap')
        # o.update(fit1)

        # Best fit position and uncertainty from fit to TS map
        o.fit_init = fit0

        # Best fit position and uncertainty from pylike scan
        o.fit_scan = fit1
        o.update(fit1)

        cdelt0 = np.abs(skywcs.wcs.cdelt[0])
        cdelt1 = np.abs(skywcs.wcs.cdelt[1])
        pix = fit1['skydir'].to_pixel(skywcs)
        o.pos_offset = skydir.separation(fit1['skydir']).deg
        o.xpix = float(pix[0])
        o.ypix = float(pix[1])
        o.deltax = (o.xpix - src_pix[0]) * cdelt0
        o.deltay = (o.ypix - src_pix[1]) * cdelt1

        o.ra_preloc = skydir.ra.deg
        o.dec_preloc = skydir.dec.deg
        o.glon_preloc = skydir.galactic.l.deg
        o.glat_preloc = skydir.galactic.b.deg

        if o.pos_offset > dtheta_max:
            o.fit_success = False

        if not o.fit_success:
            self.logger.warning('Fit to localization contour failed.')
        elif not o.fit_inbounds:
            self.logger.warning('Best-fit position outside of search region.')
        else:
            self.logger.info('Localization succeeded.')

        if update and ((not o.fit_success) or (not o.fit_inbounds)):
            self.logger.warning(
                'Localization failed.  Keeping existing position.')

        if update and o.fit_success and o.fit_inbounds:
            self.logger.info('Updating source %s '
                             'to localized position.', name)
            src = self.delete_source(name)
            src.set_position(fit1['skydir'])
            self.add_source(name, src, free=True)
            self.free_source(name, loglevel=logging.DEBUG)
            if fix_shape:
                self.free_source(name,
                                 free=False,
                                 pars='shape',
                                 loglevel=logging.DEBUG)

            fit_output = self.fit(loglevel=logging.DEBUG)
            o.loglike_loc = fit_output['loglike']
            o.dloglike_loc = o.loglike_loc - o.loglike_base
            src = self.roi.get_source_by_name(name)

            src['glon_err'] = o.glon_err
            src['glat_err'] = o.glat_err
            src['ra_err'] = o.glon_err
            src['dec_err'] = o.glat_err
            src['pos_err'] = o.pos_err
            src['pos_err_semimajor'] = o.pos_err_semimajor
            src['pos_err_semiminor'] = o.pos_err_semiminor
            src['pos_r68'] = o.pos_r68
            src['pos_r95'] = o.pos_r95
            src['pos_r99'] = o.pos_r99
            src['pos_angle'] = o.pos_angle
            src['pos_gal_cov'] = o.pos_gal_cov
            src['pos_gal_corr'] = o.pos_gal_corr
            src['pos_cel_cov'] = o.pos_cel_cov
            src['pos_cel_corr'] = o.pos_cel_corr
        else:
            saved_state.restore()
            self._sync_params(name)
            self._update_roi()

        self.logger.info(
            'Localization completed with new position:\n'
            '(  ra, dec) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
            '(glon,glat) = (%10.4f +/- %8.4f,%10.4f +/- %8.4f)\n'
            'offset = %8.4f r68 = %8.4f r95 = %8.4f r99 = %8.4f', o.ra,
            o.ra_err, o.dec, o.dec_err, o.glon, o.glon_err, o.glat, o.glat_err,
            o.pos_offset, o.pos_r68, o.pos_r95, o.pos_r99)
        self.logger.info('LogLike: %12.3f DeltaLogLike: %12.3f', o.loglike_loc,
                         o.loglike_loc - o.loglike_init)

        return o