示例#1
0
    def _fit_extension(self, name, **kwargs):

        spatial_model = kwargs.get('spatial_model', 'RadialGaussian')
        optimizer = kwargs.get('optimizer', {})
        fit_position = kwargs.get('fit_position', False)
        skydir = kwargs.get('skydir', self.roi[name].skydir)
        psf_scale_fn = kwargs.get('psf_scale_fn', None)
        reoptimize = kwargs.get('reoptimize', True)

        width = np.logspace(-2.0, 0.5, 16)
        width = np.concatenate(([0.0], width))

        loglike = self._scan_extension(name, spatial_model=spatial_model,
                                       width=width, optimizer=optimizer,
                                       skydir=skydir,
                                       psf_scale_fn=psf_scale_fn,
                                       reoptimize=reoptimize)

        ul_data = utils.get_parameter_limits(width, loglike)

        if not np.isfinite(ul_data['err']):
            ul_data['x0'] = width[np.argmax(loglike)]
            ul_data['err'] = ul_data['x0']
            ul_data['err_lo'] = ul_data['x0']
            ul_data['err_hi'] = ul_data['x0']

        err = max(10**-2.0, ul_data['err'])
        lolim = max(ul_data['x0'] - 2.0 * err, 0)

        if np.isfinite(ul_data['ul']):
            hilim = 1.5 * ul_data['ul']
        else:
            hilim = ul_data['x0'] + 2.0 * err

        nstep = max(11, int((hilim - lolim) / err))
        width2 = np.linspace(lolim, hilim, nstep)

        loglike2 = self._scan_extension(name, spatial_model=spatial_model,
                                        width=width2, optimizer=optimizer,
                                        skydir=skydir,
                                        psf_scale_fn=psf_scale_fn,
                                        reoptimize=reoptimize)
        ul_data = utils.get_parameter_limits(width2, loglike2)

        o = {}
        o['ext'] = max(ul_data['x0'], 10**-2.5)
        o['ext_ul95'] = ul_data['ul']
        o['ext_err_lo'] = ul_data['err_lo']
        o['ext_err_hi'] = ul_data['err_hi']
        o['ext_err'] = ul_data['err']
        o['loglike_ext'] = ul_data['lnlmax']
        o['ra'] = skydir.ra.deg
        o['dec'] = skydir.dec.deg
        o['glon'] = skydir.galactic.l.deg
        o['glat'] = skydir.galactic.b.deg
        o['pos_offset'] = 0.0
        return o
示例#2
0
    def _fit_extension_ebin(self, name, o, **kwargs):

        optimizer = kwargs.get('optimizer', {})
        spatial_model = kwargs.get('spatial_model')
        psf_scale_fn = kwargs.pop('psf_scale_fn', None)
        reoptimize = kwargs.pop('reoptimize', True)

        src = self.roi.copy_source(name)
        self.set_source_morphology(name,
                                   spatial_model='PointSource',
                                   use_pylike=False,
                                   psf_scale_fn=psf_scale_fn)

        for i, (logemin, logemax) in enumerate(
                zip(self.log_energies[:-1], self.log_energies[1:])):

            self.set_energy_range(logemin, logemax)
            o.ebin_loglike_ptsrc[i] = -self.like()

        self.set_energy_range(self.log_energies[0], self.log_energies[-1])
        self.set_source_morphology(name,
                                   spatial_model=src['SpatialModel'],
                                   spatial_pars=src.spatial_pars,
                                   psf_scale_fn=psf_scale_fn,
                                   use_pylike=False)

        o.ebin_loglike = self._scan_extension_fast_ebin(
            name,
            spatial_model=spatial_model,
            width=o.width,
            optimizer=kwargs['optimizer'],
            psf_scale_fn=psf_scale_fn,
            reoptimize=False)

        for i, (logemin, logemax) in enumerate(
                zip(self.log_energies[:-1], self.log_energies[1:])):
            ul_data = utils.get_parameter_limits(o.width, o.ebin_loglike[i])
            o.ebin_ext[i] = max(ul_data['x0'], 10**-2.5)
            o.ebin_ext_err[i] = ul_data['err']
            o.ebin_ext_err_lo[i] = ul_data['err_lo']
            o.ebin_ext_err_hi[i] = ul_data['err_hi']
            o.ebin_ext_ul95[i] = ul_data['ul']
            o.ebin_loglike_ext[i] = ul_data['lnlmax']
            o.ebin_ts_ext[i] = 2.0 * \
                (o.ebin_loglike_ext[i] - o.ebin_loglike_ptsrc[i])

        o.ebin_dloglike = o.ebin_loglike - o.ebin_loglike_ptsrc[:, None]
        self.set_source_morphology(name,
                                   spatial_model=src['SpatialModel'],
                                   spatial_pars=src.spatial_pars,
                                   use_pylike=False)
示例#3
0
    def _fit_extension_ebin(self, name, o, **kwargs):

        optimizer = kwargs.get('optimizer', {})
        spatial_model = kwargs.get('spatial_model')
        psf_scale_fn = kwargs.pop('psf_scale_fn', None)
        reoptimize = kwargs.pop('reoptimize', True)

        src = self.roi.copy_source(name)
        self.set_source_morphology(name, spatial_model='PointSource',
                                   use_pylike=False,
                                   psf_scale_fn=psf_scale_fn)

        for i, (logemin, logemax) in enumerate(zip(self.log_energies[:-1],
                                                   self.log_energies[1:])):

            self.set_energy_range(logemin, logemax)
            o.ebin_loglike_ptsrc[i] = -self.like()

        self.set_energy_range(self.log_energies[0], self.log_energies[-1])
        self.set_source_morphology(name, spatial_model=src['SpatialModel'],
                                   spatial_pars=src.spatial_pars,
                                   psf_scale_fn=psf_scale_fn,
                                   use_pylike=False)

        o.ebin_loglike = self._scan_extension_fast_ebin(name,
                                                        spatial_model=spatial_model,
                                                        width=o.width,
                                                        optimizer=kwargs[
                                                            'optimizer'],
                                                        psf_scale_fn=psf_scale_fn,
                                                        reoptimize=False)

        for i, (logemin, logemax) in enumerate(zip(self.log_energies[:-1],
                                                   self.log_energies[1:])):
            ul_data = utils.get_parameter_limits(o.width, o.ebin_loglike[i])
            o.ebin_ext[i] = max(ul_data['x0'], 10**-2.5)
            o.ebin_ext_err[i] = ul_data['err']
            o.ebin_ext_err_lo[i] = ul_data['err_lo']
            o.ebin_ext_err_hi[i] = ul_data['err_hi']
            o.ebin_ext_ul95[i] = ul_data['ul']
            o.ebin_loglike_ext[i] = ul_data['lnlmax']
            o.ebin_ts_ext[i] = 2.0 * \
                (o.ebin_loglike_ext[i] - o.ebin_loglike_ptsrc[i])

        o.ebin_dloglike = o.ebin_loglike - o.ebin_loglike_ptsrc[:, None]
        self.set_source_morphology(name, spatial_model=src['SpatialModel'],
                                   spatial_pars=src.spatial_pars,
                                   use_pylike=False)
示例#4
0
        object_fit_ext_dlnl[name] = tab['fit_ext_scan_dlnl']
        object_fit_halo_dlnl[name] = fit_halo_scan_dlnl
        object_fit_ext_ts[name] = tab['fit_ext_ts']
        object_fit_halo_ts[name] = tab['fit_halo_ts']
        object_name[name] = tab['name']
        
        # Sum over all sources
        fit_halo_dlnl = np.sum(fit_halo_scan_dlnl,axis=0)
        fit_ext_dlnl =  np.sum(tab['fit_ext_scan_dlnl'],axis=0)
        fit_ext_dlnl -= fit_ext_dlnl[0]
                
        for i in range(9):
            for j in range(7):

                fit_halo_dlnl_sum = fit_halo_dlnl[i,j]
                lims = utils.get_parameter_limits(eflux,fit_halo_dlnl_sum)
                halo_ts[i,j] = 2.0*lims['lnlmax']
                halo_eflux_ul95[i,j] = lims['ul']

        #halo_ts = 2.0*np.max(fit_halo_dlnl,axis=2)
                
        row_dict['name'] = name
        row_dict['fit_halo_scan_dlnl'] = fit_halo_dlnl
        row_dict['fit_halo_scan_ts'] = halo_ts
        row_dict['fit_halo_scan_eflux_ul95'] = halo_eflux_ul95
        row_dict['fit_ext_dlnl'] = fit_ext_dlnl

        ext_lims = utils.get_parameter_limits(ext_width,fit_ext_dlnl)

        row_dict['fit_ext_ts'] = 2.0*(ext_lims['lnlmax'])
        row_dict['fit_ext_ul95'] = ext_lims['ul']
示例#5
0
    def _fit_extension(self, name, **kwargs):

        spatial_model = kwargs.get('spatial_model', 'RadialGaussian')
        optimizer = kwargs.get('optimizer', {})
        width_min = kwargs.get('width_min', 10**-2.0)
        width_max = kwargs.get('width_max', 10**0.5)
        width_nstep = kwargs.get('width_nstep', 21)
        fit_position = kwargs.get('fit_position', False)
        skydir = kwargs.get('skydir', self.roi[name].skydir)
        psf_scale_fn = kwargs.get('psf_scale_fn', None)
        reoptimize = kwargs.get('reoptimize', True)

        src = self.roi.copy_source(name)

        # If the source is extended split the likelihood scan into two
        # parts centered on the best-fit value -- this ensures better
        # fit stability
        if (src['SpatialModel'] in ['RadialGaussian', 'RadialDisk']
                and src['SpatialWidth'] > width_min):
            width_lo = np.logspace(np.log10(width_min),
                                   np.log10(src['SpatialWidth']),
                                   width_nstep // 2 + (width_nstep % 2 > 0))
            width_hi = np.logspace(np.log10(src['SpatialWidth']),
                                   np.log10(width_max), width_nstep // 2 + 1)
            loglike_lo = self._scan_extension(name,
                                              spatial_model=spatial_model,
                                              width=width_lo[::-1],
                                              optimizer=optimizer,
                                              skydir=skydir,
                                              psf_scale_fn=psf_scale_fn,
                                              reoptimize=reoptimize)[::-1]
            loglike_hi = self._scan_extension(name,
                                              spatial_model=spatial_model,
                                              width=width_hi,
                                              optimizer=optimizer,
                                              skydir=skydir,
                                              psf_scale_fn=psf_scale_fn,
                                              reoptimize=reoptimize)
            width = np.concatenate((width_lo, width_hi[1:]))
            loglike = np.concatenate((loglike_lo, loglike_hi[1:]))
        else:
            width = np.logspace(np.log10(width_min), np.log10(width_max),
                                width_nstep)
            width = np.concatenate(([0.0], width))
            loglike = self._scan_extension(name,
                                           spatial_model=spatial_model,
                                           width=width,
                                           optimizer=optimizer,
                                           skydir=skydir,
                                           psf_scale_fn=psf_scale_fn,
                                           reoptimize=reoptimize)

        ul_data = utils.get_parameter_limits(width,
                                             loglike,
                                             bounds=[10**-3.0, width_max])

        if not np.isfinite(ul_data['err']):
            ul_data['x0'] = width[np.argmax(loglike)]
            ul_data['err'] = ul_data['x0']
            ul_data['err_lo'] = ul_data['x0']
            ul_data['err_hi'] = ul_data['x0']

        imax = np.argmax(loglike)
        err = max(10**-2.0, ul_data['err'])
        lolim = max(min(ul_data['x0'], width[imax]) - 2.0 * err, 0)

        if np.isfinite(ul_data['ul']):
            hilim = 1.5 * ul_data['ul']
        else:
            hilim = ul_data['x0'] + 2.0 * err

        nstep = max(width_nstep, int((hilim - lolim) / err))
        width2 = np.linspace(lolim, hilim, nstep)

        loglike2 = self._scan_extension(name,
                                        spatial_model=spatial_model,
                                        width=width2,
                                        optimizer=optimizer,
                                        skydir=skydir,
                                        psf_scale_fn=psf_scale_fn,
                                        reoptimize=reoptimize)
        ul_data2 = utils.get_parameter_limits(width2,
                                              loglike2,
                                              bounds=[10**-3.0, width_max])

        self.logger.debug('width: %s', width)
        self.logger.debug('loglike: %s', loglike - np.max(loglike))
        self.logger.debug('ul_data:\n %s', pprint.pformat(ul_data))
        self.logger.debug('width2: %s', width2)
        self.logger.debug('loglike2: %s', loglike2 - np.max(loglike2))
        self.logger.debug('ul_data2:\n %s', pprint.pformat(ul_data2))

        return MutableNamedTuple(ext=max(ul_data2['x0'], 10**-2.5),
                                 ext_ul95=ul_data2['ul'],
                                 ext_err_lo=ul_data2['err_lo'],
                                 ext_err_hi=ul_data2['err_hi'],
                                 ext_err=ul_data2['err'],
                                 loglike_ext=ul_data2['lnlmax'],
                                 ra=skydir.ra.deg,
                                 dec=skydir.dec.deg,
                                 glon=skydir.galactic.l.deg,
                                 glat=skydir.galactic.b.deg,
                                 pos_offset=0.0)
示例#6
0
    def _make_sed(self, name, **config):

        bin_index = config['bin_index']
        use_local_index = config['use_local_index']
        free_background = config['free_background']
        free_radius = config['free_radius']
        ul_confidence = config['ul_confidence']
        cov_scale = config['cov_scale']
        loge_bins = config['loge_bins']

        if not loge_bins or loge_bins is None:
            loge_bins = self.log_energies
        else:
            loge_bins = np.array(loge_bins)

        nbins = len(loge_bins) - 1
        max_index = 5.0
        min_flux = 1E-30
        npts = self.config['gtlike']['llscan_npts']
        loge_bounds = self.loge_bounds

        # Output Dictionary
        o = {'name': name,
             'loge_min': loge_bins[:-1],
             'loge_max': loge_bins[1:],
             'loge_ctr': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'loge_ref': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'e_min': 10 ** loge_bins[:-1],
             'e_max': 10 ** loge_bins[1:],
             'e_ctr': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'e_ref': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'ref_flux': np.zeros(nbins),
             'ref_eflux': np.zeros(nbins),
             'ref_dnde': np.zeros(nbins),
             'ref_dnde_e_min': np.zeros(nbins),
             'ref_dnde_e_max': np.zeros(nbins),
             'ref_e2dnde': np.zeros(nbins),
             'ref_npred': np.zeros(nbins),
             'norm': np.zeros(nbins),
             'flux': np.zeros(nbins),
             'eflux': np.zeros(nbins),
             'dnde': np.zeros(nbins),
             'e2dnde': np.zeros(nbins),
             'index': np.zeros(nbins),
             'npred': np.zeros(nbins),
             'ts': np.zeros(nbins),
             'loglike': np.zeros(nbins),
             'norm_scan': np.zeros((nbins, npts)),
             'dloglike_scan': np.zeros((nbins, npts)),
             'loglike_scan': np.zeros((nbins, npts)),
             'fit_quality': np.zeros(nbins),
             'fit_status': np.zeros(nbins),
             'correlation': {},
             'model_flux': {},
             'config': config
             }

        for t in ['norm', 'flux', 'eflux', 'dnde', 'e2dnde']:
            o['%s_err' % t] = np.zeros(nbins) * np.nan
            o['%s_err_hi' % t] = np.zeros(nbins) * np.nan
            o['%s_err_lo' % t] = np.zeros(nbins) * np.nan
            o['%s_ul95' % t] = np.zeros(nbins) * np.nan
            o['%s_ul' % t] = np.zeros(nbins) * np.nan

        saved_state = LikelihoodState(self.like)
        source = self.components[0].like.logLike.getSource(str(name))

        # Perform global spectral fit
        self._latch_free_params()
        self.free_sources(False, pars='shape', loglevel=logging.DEBUG)
        self.free_source(name, pars=config.get('free_pars', None),
                         loglevel=logging.DEBUG)
        fit_output = self.fit(loglevel=logging.DEBUG, update=False,
                              min_fit_quality=2)
        o['model_flux'] = self.bowtie(name)
        spectral_pars = gtutils.get_function_pars_dict(source.spectrum())
        o['SpectrumType'] = self.roi[name]['SpectrumType']
        o.update(model_utils.pars_dict_to_vectors(o['SpectrumType'],
                                                  spectral_pars))

        param_names = gtutils.get_function_par_names(o['SpectrumType'])
        npar = len(param_names)
        o['param_covariance'] = np.empty((npar, npar), dtype=float) * np.nan

        pmask0 = np.empty(len(fit_output['par_names']), dtype=bool)
        pmask0.fill(False)
        pmask1 = np.empty(npar, dtype=bool)
        pmask1.fill(False)
        for i, pname in enumerate(param_names):

            for j, pname2 in enumerate(fit_output['par_names']):
                if name != fit_output['src_names'][j]:
                    continue
                if pname != pname2:
                    continue
                pmask0[j] = True
                pmask1[i] = True

        src_cov = fit_output['covariance'][pmask0, :][:, pmask0]
        o['param_covariance'][np.ix_(pmask1, pmask1)] = src_cov
        o['param_correlation'] = utils.cov_to_correlation(
            o['param_covariance'])

        for i, pname in enumerate(param_names):
            o['param_covariance'][i, :] *= spectral_pars[pname]['scale']
            o['param_covariance'][:, i] *= spectral_pars[pname]['scale']

        self._restore_free_params()

        self.logger.info('Fitting SED')

        # Setup background parameters for SED
        self.free_sources(False, pars='shape')
        self.free_norm(name)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [s.name for s in
                         self.roi.get_sources(skydir=skydir,
                                              distance=free_radius,
                                              exclude=diff_sources)]
            self.free_sources_by_name(free_srcs, pars='norm',
                                      loglevel=logging.DEBUG)

        if cov_scale is not None:
            self._latch_free_params()
            self.zero_source(name)
            self.fit(loglevel=logging.DEBUG, update=False)
            srcNames = list(self.like.sourceNames())
            srcNames.remove(name)
            self.constrain_norms(srcNames, cov_scale)
            self.unzero_source(name)
            self._restore_free_params()

        # Precompute fluxes in each bin from global fit
        gf_bin_flux = []
        gf_bin_index = []
        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            emin = 10 ** logemin
            emax = 10 ** logemax
            delta = 1E-5
            f = self.like[name].flux(emin, emax)
            f0 = self.like[name].flux(emin * (1 - delta), emin * (1 + delta))
            f1 = self.like[name].flux(emax * (1 - delta), emax * (1 + delta))

            if f0 > min_flux and f1 > min_flux:
                g = 1 - np.log10(f0 / f1) / np.log10(emin / emax)
                gf_bin_index += [g]
                gf_bin_flux += [f]
            else:
                gf_bin_index += [max_index]
                gf_bin_flux += [min_flux]

        old_spectrum = source.spectrum()
        old_pars = copy.deepcopy(self.roi[name].spectral_pars)
        old_type = self.roi[name]['SpectrumType']

        spectrum_pars = {
            'Prefactor':
                {'value': 1.0, 'scale': 1E-13, 'min': 1E-10,
                    'max': 1E10, 'free': True},
            'Index':
                {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0, 'free': False},
            'Scale':
                {'value': 1E3, 'scale': 1.0, 'min': 1., 'max': 1E6, 'free': False},
        }

        self.set_source_spectrum(str(name), 'PowerLaw',
                                 spectrum_pars=spectrum_pars,
                                 update_source=False)

        src_norm_idx = -1
        free_params = self.get_params(True)
        for j, p in enumerate(free_params):
            if not p['is_norm']:
                continue
            if p['is_norm'] and p['src_name'] == name:
                src_norm_idx = j

            o['correlation'][p['src_name']] = np.zeros(nbins) * np.nan

        self._fitcache = None

        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            logectr = 0.5 * (logemin + logemax)
            emin = 10 ** logemin
            emax = 10 ** logemax
            ectr = 10 ** logectr
            ectr2 = ectr**2

            saved_state_bin = LikelihoodState(self.like)
            if use_local_index:
                o['index'][i] = -min(gf_bin_index[i], max_index)
            else:
                o['index'][i] = -bin_index

            self.set_norm(name, 1.0, update_source=False)
            self.set_parameter(name, 'Index', o['index'][i], scale=1.0,
                               update_source=False)
            self.like.syncSrcParams(str(name))

            ref_flux = self.like[name].flux(emin, emax)

            o['ref_flux'][i] = self.like[name].flux(emin, emax)
            o['ref_eflux'][i] = self.like[name].energyFlux(emin, emax)
            o['ref_dnde'][i] = self.like[name].spectrum()(pyLike.dArg(ectr))
            o['ref_dnde_e_min'][i] = self.like[
                name].spectrum()(pyLike.dArg(emin))
            o['ref_dnde_e_max'][i] = self.like[
                name].spectrum()(pyLike.dArg(emax))
            o['ref_e2dnde'][i] = o['ref_dnde'][i] * ectr2
            cs = self.model_counts_spectrum(
                name, logemin, logemax, summed=True)
            o['ref_npred'][i] = np.sum(cs)

            normVal = self.like.normPar(name).getValue()
            flux_ratio = gf_bin_flux[i] / ref_flux
            newVal = max(normVal * flux_ratio, 1E-10)
            self.set_norm(name, newVal, update_source=False)
            self.set_norm_bounds(name, [newVal * 1E-6, newVal * 1E4])

            self.like.syncSrcParams(str(name))
            self.free_norm(name)
            self.logger.debug('Fitting %s SED from %.0f MeV to %.0f MeV' %
                              (name, emin, emax))
            self.set_energy_range(logemin, logemax)

            fit_output = self._fit(**config['optimizer'])
            free_params = self.get_params(True)
            for j, p in enumerate(free_params):

                if not p['is_norm']:
                    continue

                o['correlation'][p['src_name']][i] = \
                    fit_output['correlation'][src_norm_idx, j]

            o['fit_quality'][i] = fit_output['fit_quality']
            o['fit_status'][i] = fit_output['fit_status']

            flux = self.like[name].flux(emin, emax)
            eflux = self.like[name].energyFlux(emin, emax)
            dnde = self.like[name].spectrum()(pyLike.dArg(ectr))

            o['norm'][i] = flux / o['ref_flux'][i]
            o['flux'][i] = flux
            o['eflux'][i] = eflux
            o['dnde'][i] = dnde
            o['e2dnde'][i] = dnde * ectr2

            cs = self.model_counts_spectrum(name, logemin,
                                            logemax, summed=True)
            o['npred'][i] = np.sum(cs)
            o['loglike'][i] = fit_output['loglike']

            lnlp = self.profile_norm(name, logemin=logemin, logemax=logemax,
                                     savestate=True, reoptimize=True,
                                     npts=npts, optimizer=config['optimizer'])

            o['ts'][i] = max(
                2.0 * (fit_output['loglike'] - lnlp['loglike'][0]), 0.0)
            o['loglike_scan'][i] = lnlp['loglike']
            o['dloglike_scan'][i] = lnlp['dloglike']
            o['norm_scan'][i] = lnlp['flux'] / ref_flux

            ul_data = utils.get_parameter_limits(
                lnlp['flux'], lnlp['dloglike'])

            o['norm_err_hi'][i] = ul_data['err_hi'] / ref_flux
            o['norm_err_lo'][i] = ul_data['err_lo'] / ref_flux

            if np.isfinite(ul_data['err_lo']):
                o['norm_err'][i] = 0.5 * (ul_data['err_lo'] +
                                          ul_data['err_hi']) / ref_flux
            else:
                o['norm_err'][i] = ul_data['err_hi'] / ref_flux

            o['norm_ul95'][i] = ul_data['ul'] / ref_flux

            ul_data = utils.get_parameter_limits(lnlp['flux'],
                                                 lnlp['dloglike'],
                                                 cl_limit=ul_confidence)
            o['norm_ul'][i] = ul_data['ul'] / ref_flux

            saved_state_bin.restore()

        for t in ['flux', 'eflux', 'dnde', 'e2dnde']:

            o['%s_err' % t] = o['norm_err'] * o['ref_%s' % t]
            o['%s_err_hi' % t] = o['norm_err_hi'] * o['ref_%s' % t]
            o['%s_err_lo' % t] = o['norm_err_lo'] * o['ref_%s' % t]
            o['%s_ul95' % t] = o['norm_ul95'] * o['ref_%s' % t]
            o['%s_ul' % t] = o['norm_ul'] * o['ref_%s' % t]

        self.set_energy_range(loge_bounds[0], loge_bounds[1])
        self.set_source_spectrum(str(name), old_type,
                                 spectrum_pars=old_pars,
                                 update_source=False)

        saved_state.restore()
        self._sync_params(name)

        if cov_scale is not None:
            self.remove_priors()

        return o
示例#7
0
    def _fit_extension(self, name, **kwargs):

        spatial_model = kwargs.get('spatial_model', 'RadialGaussian')
        optimizer = kwargs.get('optimizer', {})
        fit_position = kwargs.get('fit_position', False)
        skydir = kwargs.get('skydir', self.roi[name].skydir)
        psf_scale_fn = kwargs.get('psf_scale_fn', None)
        reoptimize = kwargs.get('reoptimize', True)

        src = self.roi.copy_source(name)

        # If the source is extended split the likelihood scan into two
        # parts centered on the best-fit value -- this ensures better
        # fit stability
        if (src['SpatialModel'] in ['RadialGaussian', 'RadialDisk'] and
                src['SpatialWidth'] > 0.1):
            width_lo = np.logspace(-2.0, np.log10(src['SpatialWidth']), 11)
            width_hi = np.logspace(np.log10(src['SpatialWidth']), 0.5, 11)
            loglike_lo = self._scan_extension(name, spatial_model=spatial_model,
                                              width=width_lo[::-1],
                                              optimizer=optimizer,
                                              skydir=skydir,
                                              psf_scale_fn=psf_scale_fn,
                                              reoptimize=reoptimize)[::-1]
            loglike_hi = self._scan_extension(name, spatial_model=spatial_model,
                                              width=width_hi,
                                              optimizer=optimizer,
                                              skydir=skydir,
                                              psf_scale_fn=psf_scale_fn,
                                              reoptimize=reoptimize)
            width = np.concatenate((width_lo, width_hi[1:]))
            loglike = np.concatenate((loglike_lo, loglike_hi[1:]))
        else:
            width = np.logspace(-2.0, 0.5, 21)
            width = np.concatenate(([0.0], width))
            loglike = self._scan_extension(name, spatial_model=spatial_model,
                                           width=width, optimizer=optimizer,
                                           skydir=skydir,
                                           psf_scale_fn=psf_scale_fn,
                                           reoptimize=reoptimize)

        ul_data = utils.get_parameter_limits(width, loglike,
                                             bounds=[10**-3.0, 10**0.5])

        if not np.isfinite(ul_data['err']):
            ul_data['x0'] = width[np.argmax(loglike)]
            ul_data['err'] = ul_data['x0']
            ul_data['err_lo'] = ul_data['x0']
            ul_data['err_hi'] = ul_data['x0']

        imax = np.argmax(loglike)
        err = max(10**-2.0, ul_data['err'])
        lolim = max(min(ul_data['x0'], width[imax]) - 2.0 * err, 0)

        if np.isfinite(ul_data['ul']):
            hilim = 1.5 * ul_data['ul']
        else:
            hilim = ul_data['x0'] + 2.0 * err

        nstep = max(11, int((hilim - lolim) / err))
        width2 = np.linspace(lolim, hilim, nstep)

        loglike2 = self._scan_extension(name, spatial_model=spatial_model,
                                        width=width2, optimizer=optimizer,
                                        skydir=skydir,
                                        psf_scale_fn=psf_scale_fn,
                                        reoptimize=reoptimize)
        ul_data2 = utils.get_parameter_limits(width2, loglike2,
                                              bounds=[10**-3.0, 10**0.5])

        self.logger.debug('width: %s', width)
        self.logger.debug('loglike: %s', loglike - np.max(loglike))
        self.logger.debug('ul_data:\n %s', pprint.pformat(ul_data))
        self.logger.debug('width2: %s', width2)
        self.logger.debug('loglike2: %s', loglike2 - np.max(loglike2))
        self.logger.debug('ul_data2:\n %s', pprint.pformat(ul_data2))

        return MutableNamedTuple(
            ext=max(ul_data2['x0'], 10**-2.5),
            ext_ul95=ul_data2['ul'],
            ext_err_lo=ul_data2['err_lo'],
            ext_err_hi=ul_data2['err_hi'],
            ext_err=ul_data2['err'],
            loglike_ext=ul_data2['lnlmax'],
            ra=skydir.ra.deg,
            dec=skydir.dec.deg,
            glon=skydir.galactic.l.deg,
            glat=skydir.galactic.b.deg,
            pos_offset=0.0)
示例#8
0
def fit_halo_scan(gta,
                  modelname,
                  src_name,
                  halo_width,
                  halo_index,
                  spatial_model='RadialGaussian',
                  loge_bounds=None,
                  optimizer='NEWTON'):

    gta.logger.info('Starting Halo Scan %s' % (modelname))

    halo_source_name = 'halo_' + spatial_model
    halo_source_dict = {
        'SpectrumType': 'PowerLaw',
        'Index': {
            'value': 2.0,
            'scale': -1.0,
            'min': 0.5,
            'max': 4.5
        },
        'Scale': 1000,
        'Prefactor': {
            'value': 1E-5,
            'scale': 1e-13,
            'min': 1E-5,
            'max': 1E4
        },
        'SpatialModel': spatial_model,
        'SpatialWidth': 1.0
    }

    outprefix = '%s_%s' % (modelname, halo_source_name)

    halo_source_dict['ra'] = gta.roi[src_name]['ra']
    halo_source_dict['dec'] = gta.roi[src_name]['dec']

    #gta.load_roi(modelname)
    #if loge_bounds is not None:
    #    gta.set_energy_range(loge_bounds[0],loge_bounds[1])

    skydir = gta.roi[src_name].skydir
    diff_sources = [s.name for s in gta.roi.sources if s.diffuse]

    gta.free_sources(False)
    gta.free_sources(skydir=skydir,
                     distance=1.0,
                     pars='norm',
                     exclude=diff_sources)
    gta.write_xml(modelname + '_base')

    halo_tab = gta.roi.create_table([])
    halo_tab_idx_free = gta.roi.create_table([])
    halo_data = []
    halo_data_idx_free = []

    for i, w in enumerate(halo_width):

        gta.logger.info('Fitting Halo Width %.3f', w)

        halo_source_dict['SpatialWidth'] = w
        gta.load_xml(modelname + '_base')

        gta.add_source(halo_source_name, halo_source_dict, free=True)

        # Free Index
        gta.free_norm(halo_source_name)
        gta.fit(optimizer=optimizer)
        gta.sed(halo_source_name,
                prefix='%s_cov05_%02i' % (modelname, i),
                outfile='%s_cov05_%02i_sed' % (outprefix, i),
                free_radius=1.0,
                cov_scale=5.0,
                optimizer={'optimizer': 'MINUIT'},
                make_plots=False)

        gta.sed(halo_source_name,
                prefix='%s_cov10_%02i' % (modelname, i),
                outfile='%s_cov10_%02i_sed' % (outprefix, i),
                free_radius=1.0,
                cov_scale=10.0,
                optimizer={'optimizer': 'MINUIT'},
                make_plots=False)

        gta.free_parameter(halo_source_name, 'Index')
        gta.fit(optimizer=optimizer)
        gta.free_parameter(halo_source_name, 'Index', False)
        gta.update_source(halo_source_name,
                          reoptimize=True,
                          optimizer={'optimizer': optimizer})

        halo_data_idx_free += [copy.deepcopy(gta.roi[halo_source_name].data)]
        gta.roi[halo_source_name].add_to_table(halo_tab_idx_free)
        gta.write_roi('%s_%02i' % (outprefix, i), make_plots=False)

        gta.print_params(loglevel=logging.DEBUG)

        # Scan over fixed index
        for j, idx in enumerate(halo_index):

            gta.logger.info('Fitting Halo Index %.3f', idx)

            model_idx = i * len(halo_index) + j
            gta.set_norm(halo_source_name, 0.1, update_source=False)
            gta.set_parameter(halo_source_name,
                              'Index',
                              -1.0 * idx,
                              update_source=False)

            gta.fit(update=False, optimizer=optimizer)

            gta.print_params(loglevel=logging.DEBUG)

            gta.update_source(halo_source_name,
                              reoptimize=True,
                              optimizer={'optimizer': optimizer})

            ul_flux = get_parameter_limits(
                gta.roi[halo_source_name]['flux_scan'],
                gta.roi[halo_source_name]['loglike_scan'])
            ul_eflux = get_parameter_limits(
                gta.roi[halo_source_name]['eflux_scan'],
                gta.roi[halo_source_name]['loglike_scan'])

            gta.roi[halo_source_name]['flux_err'] = ul_flux['err']
            gta.roi[halo_source_name]['eflux_err'] = ul_eflux['err']

            gta.logger.info(
                '%s Halo Width: %6.3f Index: %6.2f TS: %6.2f Flux: %8.4g',
                modelname, w, idx, gta.roi[halo_source_name]['ts'],
                gta.roi[halo_source_name]['flux'])

            #gta.write_roi('%s_%02i_%02i'%(outprefix,i,j),make_plots=False)
            halo_data += [copy.deepcopy(gta.roi[halo_source_name].data)]
            gta.roi[halo_source_name].add_to_table(halo_tab)

        gta.delete_source(halo_source_name, save_template=False)

    np.save(os.path.join(gta.workdir, '%s_data.npy' % outprefix), halo_data)
    np.save(os.path.join(gta.workdir, '%s_data_idx_free.npy' % outprefix),
            halo_data_idx_free)

    tab_halo_width, tab_halo_index = np.meshgrid(halo_width,
                                                 halo_index,
                                                 indexing='ij')
    halo_tab['halo_width'] = np.ravel(tab_halo_width)
    halo_tab['halo_index'] = np.ravel(tab_halo_index)
    halo_tab_idx_free['halo_width'] = halo_width

    stack_files(
        sorted(
            glob.glob(os.path.join(gta.workdir, '%s*cov05*fits' % outprefix))),
        os.path.join(gta.workdir, '%s_cov05_sed.fits' % outprefix),
        new_cols=[Column(name='halo_width', data=halo_width, unit='deg')])

    stack_files(
        sorted(
            glob.glob(os.path.join(gta.workdir, '%s*cov10*fits' % outprefix))),
        os.path.join(gta.workdir, '%s_cov10_sed.fits' % outprefix),
        new_cols=[Column(name='halo_width', data=halo_width, unit='deg')])

    halo_tab.write(os.path.join(gta.workdir, '%s_data.fits' % outprefix),
                   overwrite=True)
    halo_tab_idx_free.write(os.path.join(gta.workdir,
                                         '%s_data_idx_free.fits' % outprefix),
                            overwrite=True)
    gta.logger.info('Finished Halo Scan %s' % (modelname))
def run_composite2(lst_inputs, path_outdir, names_params_tied_universal=['Index'], names_params_tied_category=['Prefactor'], ncategory=0, str_suffix=''):
    # Open table
    tb = ReadLTFCatalogueInfo.open_table()
    # Definition of GBM fluence categories
    FLUENCE_CUT = [1.09e-04, 3.06e-05] #[1.45E-4, 3.70E-5] # Top 10%, 35%
    NCATEGORIES_FLUENCE = len(FLUENCE_CUT)+1
    dct_category_fluence = {}
#    rh_fluence_weightNobs = ROOT.TH1D('roohtg', 'GBM Fluence', 100, -7, -2)

    path_base = os.getcwd()
    os.chdir(path_outdir)
    lst_name_subdir = ['plots', 'xml', 'fits']
    for name_subdir in lst_name_subdir:
        path_subdir = '{0}/{1}'.format(path_outdir, name_subdir)
        if not os.path.exists(path_subdir):
            os.makedirs(path_subdir)
    if str_suffix != '':
        str_suffix = '_' + str_suffix
    irfs = 'P8R2_SOURCE_V6' # To be used with P301 data
    optimizer='Minuit'
    level = 2.71

    like={}
    targets = []
    lst_fluence_gbm = []
    lst_fluence_gbm_err = []
    lst_nobs_lat = []
    for (itarget, path_target) in enumerate(lst_inputs):
        path_base, name_base = os.path.split(path_target)
        target = name_base[3:12]
        targets.append(target)
        print '##### No.{0} {1} #####'.format(itarget, target)
        dct_category_fluence[target] = judge_category_fluence(tb, target, FLUENCE_CUT) 
        if ncategory-1 not in (dct_category_fluence[target], -1):
            print 'skipped.'
            continue

        ltcube = '/'.join((path_base, name_base+'_ft1_ltCube.fits'))
        expMap = '/'.join((path_base, name_base+'_ft1_expMap.fits'))
        srcModel = '/'.join((path_base, name_base+'_ft1_model.xml'))
        evt = '/'.join((path_base, name_base+'_ft1_filtered.fits'))
        sc = '/'.join((path_base, '../../../../..', name_base.replace('_P8_P302_BASE_T00-999-101000_r030', '_T00-999-101000_ft2-30s.fits')))
        if itarget==0:
            print 'Files of the first target.'
            print '  Event:', evt
            print '  Spacecraft:', sc
            print '  Livetime cube:', ltcube
            print '  Exposure map:', expMap
            print '  Source model:', srcModel

        # Diffuse responses
        my_apps.diffResps['evfile'] = evt
        my_apps.diffResps['scfile'] = sc
        my_apps.diffResps['srcmdl'] = srcModel
        my_apps.diffResps['irfs'] = irfs
        my_apps.diffResps.run()

        like[target] = unbinnedAnalysis(evfile=evt,
                                        scfile=sc,
                                        expmap=expMap,
                                        expcube=ltcube,
                                        irfs=irfs,
                                        srcmdl=srcModel,
                                        optimizer=optimizer)
        for source in like[target].sourceNames():
            if source not in (target):
                like[target].normPar(source).setFree(False)
        sys.stdout.flush()

    CompositeLike = []
    # Scan ecutoff
    xvals = 10 ** np.linspace(2.0, 5.125, 26)
    ecutoff_lim95 = {}
    ebreak_fixed = 10.
    for icat in range(NCATEGORIES_FLUENCE):
        print '======================'
        print '===== Category', icat, '====='
        print '======================'
        if ncategory-1 not in (icat, -1):
            print 'skipped.'
            continue
        fit_results = []
        for i, x in enumerate(xvals):
            print '---------------'
            print 'Cutoff energy:', x, 'MeV'
            tiedParams_category = {}
            tiedParams_universal = {}
            for par in names_params_tied_category:
                tiedParams_category[par] = []
            for par in names_params_tied_universal:
                tiedParams_universal[par] = []
            CompositeLike = Composite2(optimizer=optimizer)
            for target in targets:
                if dct_category_fluence[target]==icat:
                    print target
                  # Fixing cutoff energy
                    ecutoff_index = like[target].par_index(target, 'P1')
                    print like[target][ecutoff_index]
                    like[target][ecutoff_index] = x
                    like[target].freeze(ecutoff_index)
                    print "  {0}'s P1 is freezed as {1}".format(target, like[target][ecutoff_index])
                  # Fixing break energy
                    ebreak_index = like[target].par_index(target, 'Ebreak')
                    print like[target][ebreak_index]
                    like[target][ebreak_index] = ebreak_fixed
                    like[target].freeze(ebreak_index)
                    print "  {0}'s Ebreak is freezed as {1}".format(target, like[target][ebreak_index])
                    CompositeLike.addComponent(like[target])
                   # Tying parameters for each fluence category separately
                    for par in names_params_tied_category:
                        tiedParams_category[par].append(tuple([like[target], target, par]))
                   # Tying parameters universaly
                    for par in names_params_tied_universal:
                        tiedParams_universal[par].append(tuple([like[target], target, par]))
                    sys.stdout.flush()
            print '* Parameters tied by each category:'
            print tiedParams_category
            print '* Parameters tied universaly:'
            print tiedParams_universal
            for par in names_params_tied_category:
                CompositeLike.tieParameters(tuple(tiedParams_category[par]))
            for par in names_params_tied_universal:
                CompositeLike.tieParameters(tuple(tiedParams_universal[par]))

            fit_results.append(CompositeLike.fit(covar=False,tol=1.e-2,optimizer=optimizer))

        # Limit of ecutoff
        loglike_inversed_scanned = np.array(fit_results)
        loglike_inversed_min = min(loglike_inversed_scanned)
        print '* Negative log-likelihood :'
        for (i,s) in enumerate(loglike_inversed_scanned):
            print '{0} ({1}) at {2} MeV'.format(s, s-loglike_inversed_min, xvals[i])
        ecutoff_lim95[icat] = get_parameter_limits(xvals, -1*loglike_inversed_scanned, cl_limit=0.99540, cl_err=0.9398)
    print '* 95% limit of cutoff energy:'
    print ecutoff_lim95