Exemple #1
0
 def call_pylike_spectrum(spectrum, e):
     """ Method to call a pylikelihood spectrum given
         either a python numer or a numpy array. """
     from pyLikelihood import dArg
     if isinstance(e, collections.Iterable):
         return np.asarray([spectrum(dArg(i)) for i in e])
     else:
         return spectrum(dArg(e))
Exemple #2
0
def dNde(energy, Fit, name):
    """Compute the dN/dE value at energy E fir the source name"""
    import pyLikelihood

    ptsrc = pyLikelihood.PointSource_cast(Fit[name].src)
    arg = pyLikelihood.dArg(energy)
    return ptsrc.spectrum()(arg)
Exemple #3
0
    def __call__(self, xval=100, verbose=0):
        x = pyLike.dArg(xval)
        y0 = self.func.value(x)
        params = pyLike.DoubleVector()
        self.func.getFreeParamValues(params)

        eps = 1e-7
        num_derivs = []
        for i in range(len(params)):
            new_params = list(params)
            delta = new_params[i] * eps
            if delta == 0:
                delta = eps
            new_params[i] += delta
            self.func.setFreeParamValues(new_params)
            y1 = self.func.value(x)
            num_derivs.append((y1 - y0) / delta)

        derivs = pyLike.DoubleVector()

        self.func.setFreeParamValues(params)
        self.func.getFreeDerivs(x, derivs)

        for i, d0, d1 in zip(range(len(derivs)), num_derivs, derivs):
            try:
                assert (compare_floats(d0, d1))
                if verbose:
                    raise AssertionError
            except AssertionError:
                parnames = pyLike.StringVector()
                self.func.getFreeParamNames(parnames)
                print "Parameter : ", i, parnames[i]
                print "%.3e  " * len(num_derivs) % tuple(num_derivs)
                print "%.3e  " * len(derivs) % tuple(derivs) + "\n"
        return tuple(params)
    def calcBowtie(self, srcName, minE, maxE, numBins):
        '''This is derived from T. Johnson's likeSED code which was in turn
        derived from D. Sanchez's pyUnfoldPlot code which was probably
        based on some code developed by J. Chiang.  '''
        '''make some energy bounds for the fit, same max and min as for the
        bands before but with more bins.'''

        modEs = qU.log_array(numBins, minE, maxE)
        centEs = [0.5 * (e1 + e2) for e1, e2 in zip(modEs[0:-1], modEs[1:])]
        '''Get the model.'''
        mysrc = pyLike.PointSource_cast(self.MIN[srcName].src)
        spec = [
            float(1000. * mysrc.spectrum()(pyLike.dArg(x))) for x in centEs
        ]

        if (self.MIN.covariance is None):
            print "Whoa, you didn't compute the covariance yet..."
            bt = [0]
        else:
            bt = []
            covArray = np.array(self.MIN.covariance)
            srcCovArray = []
            par_index_map = {}
            indx = 0
            for src in self.MIN.sourceNames():
                parNames = pyLike.StringVector()
                self.MIN[src].src.spectrum().getFreeParamNames(parNames)
                for par in parNames:
                    par_index_map['::'.join((src, par))] = indx
                    indx += 1
            srcPars = pyLike.StringVector()
            self.MIN[srcName].src.spectrum().getFreeParamNames(srcPars)
            pars = ['::'.join((srcName, x)) for x in srcPars]
            for xpar in pars:
                ix = par_index_map[xpar]
                srcCovArray.append(
                    [covArray[ix][par_index_map[ypar]] for ypar in pars])
            cov = np.array(srcCovArray)
            ''' The whole point here is to get the srcCovArray.'''
            for x in centEs:
                arg = pyLike.dArg(x)
                partials = np.array(
                    [mysrc.spectrum().derivByParam(arg, y) for y in srcPars])
                val = np.sqrt(np.dot(partials, np.dot(cov, partials)))
                '''These should come out same as the model so convert to ph/cm^2/s/GeV as well.'''
                bt += [float(1000. * val)]
        return centEs, bt, spec
    def calcBowtie(self,srcName,minE,maxE,numBins):
        
        '''This is derived from T. Johnson's likeSED code which was in turn
        derived from D. Sanchez's pyUnfoldPlot code which was probably
        based on some code developed by J. Chiang.  '''

        '''make some energy bounds for the fit, same max and min as for the
        bands before but with more bins.'''

        modEs=qU.log_array(numBins,minE,maxE)
        centEs=[0.5*(e1+e2) for e1,e2 in zip(modEs[0:-1],modEs[1:])]

        '''Get the model.'''
        mysrc=pyLike.PointSource_cast(self.MIN[srcName].src)
        spec=[float(1000.*mysrc.spectrum()(pyLike.dArg(x))) for x in centEs]

        if(self.MIN.covariance is None):
            print "Whoa, you didn't compute the covariance yet..."
            bt=[0]
        else:
            bt=[]
            covArray=np.array(self.MIN.covariance)
            srcCovArray=[]
            par_index_map={}
            indx=0
            for src in self.MIN.sourceNames():
                parNames=pyLike.StringVector()
                self.MIN[src].src.spectrum().getFreeParamNames(parNames)
                for par in parNames:
                    par_index_map['::'.join((src,par))]=indx
                    indx +=1
            srcPars=pyLike.StringVector()
            self.MIN[srcName].src.spectrum().getFreeParamNames(srcPars)
            pars=['::'.join((srcName,x)) for x in srcPars]
            for xpar in pars:
                ix=par_index_map[xpar]
                srcCovArray.append([covArray[ix][par_index_map[ypar]] for ypar in pars])
            cov=np.array(srcCovArray)
            ''' The whole point here is to get the srcCovArray.'''
            for x in centEs:
                arg=pyLike.dArg(x)
                partials=np.array([mysrc.spectrum().derivByParam(arg,y) for y in srcPars])
                val=np.sqrt(np.dot(partials,np.dot(cov,partials)))
                '''These should come out same as the model so convert to ph/cm^2/s/GeV as well.'''
                bt+=[float(1000.*val)]
        return centEs,bt,spec
 def get_dnde_mev_gtlike(spectrum, energies):
     """ Returns the spectrum in units of ph/cm^2/s/MeV. """
     if isinstance(energies, collections.Iterable):
         return np.asarray([
             SpectrumPlotter.get_dnde_mev_gtlike(spectrum, i)
             for i in energies
         ])
     return spectrum(pyLikelihood.dArg(energies))
    def get_dnde_error_mev_gtlike(spectrum,covariance_matrix,energies):
        """ asume energy in mev and return flux in units of ph/cm**2/s/MeV. """
        from . models import gtlike_unscale_all_parameters
        spectrum = gtlike_unscale_all_parameters(spectrum)

        dnde_err = np.empty_like(energies)
        for i,energy in enumerate(energies):

            # method taken from pyLikelihood.FluxDensity
            srcpars = pyLikelihood.StringVector()
            spectrum.getParamNames(srcpars)
            arg = pyLikelihood.dArg(energy)
            partials = np.array([spectrum.derivByParam(arg, x) for x in srcpars])
            dnde_err[i] = np.sqrt(np.dot(partials, np.dot(covariance_matrix, partials)))
        return dnde_err
Exemple #8
0
    def MakeSEDError(self, pars):
        """@todo: document me"""
        estep = np.log(pars.Emax / pars.Emin) / (pars.N - 1)
        energies = pars.Emin * np.exp(estep * np.arange(np.float(pars.N)))
        err = np.zeros(pars.N)
        j = 0
        for ene in energies:
            arg = pyLikelihood.dArg(ene)
            partials = np.zeros(len(self.srcpars))
            for i in xrange(len(self.srcpars)):
                x = self.srcpars[i]
                partials[i] = self.ptsrc.spectrum().derivByParam(arg, x)
            err[j] = np.sqrt(np.dot(partials, np.dot(self.covar, partials)))
            j += 1

        return MEV_TO_ERG  * energies ** 2 * err #Mev to Ergs
    def get_dnde_error_mev_gtlike(spectrum, covariance_matrix, energies):
        """ asume energy in mev and return flux in units of ph/cm**2/s/MeV. """
        from .models import gtlike_unscale_all_parameters
        spectrum = gtlike_unscale_all_parameters(spectrum)

        dnde_err = np.empty_like(energies)
        for i, energy in enumerate(energies):

            # method taken from pyLikelihood.FluxDensity
            srcpars = pyLikelihood.StringVector()
            spectrum.getParamNames(srcpars)
            arg = pyLikelihood.dArg(energy)
            partials = np.array(
                [spectrum.derivByParam(arg, x) for x in srcpars])
            dnde_err[i] = np.sqrt(
                np.dot(partials, np.dot(covariance_matrix, partials)))
        return dnde_err
Exemple #10
0
 def __call__(self, ee):
     foo = FunctionWrapper(lambda x: self.func.value(pyLike.dArg(x)))
     return foo(ee)
Exemple #11
0
 def get_dnde_mev_gtlike(spectrum,energies):
     """ Returns the spectrum in units of ph/cm^2/s/MeV. """
     if isinstance(energies, collections.Iterable):
         return np.asarray([SpectrumPlotter.get_dnde_mev_gtlike(spectrum,i) for i in energies])
     return spectrum(pyLikelihood.dArg(energies))
Exemple #12
0
 def dNde(self, energy):
     arg = pyLikelihood.dArg(energy)
     return self.ptsrc.spectrum()(arg)
Exemple #13
0
def dNde(energy, Fit, name):
    '''Compute the dN/dE value at energy E fir the source name'''
    import pyLikelihood
    ptsrc = pyLikelihood.PointSource_cast(Fit[name].src)
    arg = pyLikelihood.dArg(energy)
    return ptsrc.spectrum()(arg)
Exemple #14
0
 def error(self, energy):
     arg = pyLike.dArg(energy)
     partials = num.array(
         [self.src.spectrum().derivByParam(arg, x) for x in self.srcpars])
     return num.sqrt(num.dot(partials, num.dot(self.covar, partials)))
Exemple #15
0
 def value(self, energy):
     arg = pyLike.dArg(energy)
     return self.src.spectrum()(arg)
Exemple #16
0
    def _make_sed(self, name, **config):

        bin_index = config['bin_index']
        use_local_index = config['use_local_index']
        free_background = config['free_background']
        free_radius = config['free_radius']
        ul_confidence = config['ul_confidence']
        cov_scale = config['cov_scale']
        loge_bins = config['loge_bins']

        if not loge_bins or loge_bins is None:
            loge_bins = self.log_energies
        else:
            loge_bins = np.array(loge_bins)

        nbins = len(loge_bins) - 1
        max_index = 5.0
        min_flux = 1E-30
        npts = self.config['gtlike']['llscan_npts']
        loge_bounds = self.loge_bounds

        # Output Dictionary
        o = {'name': name,
             'loge_min': loge_bins[:-1],
             'loge_max': loge_bins[1:],
             'loge_ctr': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'loge_ref': 0.5 * (loge_bins[:-1] + loge_bins[1:]),
             'e_min': 10 ** loge_bins[:-1],
             'e_max': 10 ** loge_bins[1:],
             'e_ctr': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'e_ref': 10 ** (0.5 * (loge_bins[:-1] + loge_bins[1:])),
             'ref_flux': np.zeros(nbins),
             'ref_eflux': np.zeros(nbins),
             'ref_dnde': np.zeros(nbins),
             'ref_dnde_e_min': np.zeros(nbins),
             'ref_dnde_e_max': np.zeros(nbins),
             'ref_e2dnde': np.zeros(nbins),
             'ref_npred': np.zeros(nbins),
             'norm': np.zeros(nbins),
             'flux': np.zeros(nbins),
             'eflux': np.zeros(nbins),
             'dnde': np.zeros(nbins),
             'e2dnde': np.zeros(nbins),
             'index': np.zeros(nbins),
             'npred': np.zeros(nbins),
             'ts': np.zeros(nbins),
             'loglike': np.zeros(nbins),
             'norm_scan': np.zeros((nbins, npts)),
             'dloglike_scan': np.zeros((nbins, npts)),
             'loglike_scan': np.zeros((nbins, npts)),
             'fit_quality': np.zeros(nbins),
             'fit_status': np.zeros(nbins),
             'correlation': {},
             'model_flux': {},
             'config': config
             }

        for t in ['norm', 'flux', 'eflux', 'dnde', 'e2dnde']:
            o['%s_err' % t] = np.zeros(nbins) * np.nan
            o['%s_err_hi' % t] = np.zeros(nbins) * np.nan
            o['%s_err_lo' % t] = np.zeros(nbins) * np.nan
            o['%s_ul95' % t] = np.zeros(nbins) * np.nan
            o['%s_ul' % t] = np.zeros(nbins) * np.nan

        saved_state = LikelihoodState(self.like)
        source = self.components[0].like.logLike.getSource(str(name))

        # Perform global spectral fit
        self._latch_free_params()
        self.free_sources(False, pars='shape', loglevel=logging.DEBUG)
        self.free_source(name, pars=config.get('free_pars', None),
                         loglevel=logging.DEBUG)
        fit_output = self.fit(loglevel=logging.DEBUG, update=False,
                              min_fit_quality=2)
        o['model_flux'] = self.bowtie(name)
        spectral_pars = gtutils.get_function_pars_dict(source.spectrum())
        o['SpectrumType'] = self.roi[name]['SpectrumType']
        o.update(model_utils.pars_dict_to_vectors(o['SpectrumType'],
                                                  spectral_pars))

        param_names = gtutils.get_function_par_names(o['SpectrumType'])
        npar = len(param_names)
        o['param_covariance'] = np.empty((npar, npar), dtype=float) * np.nan

        pmask0 = np.empty(len(fit_output['par_names']), dtype=bool)
        pmask0.fill(False)
        pmask1 = np.empty(npar, dtype=bool)
        pmask1.fill(False)
        for i, pname in enumerate(param_names):

            for j, pname2 in enumerate(fit_output['par_names']):
                if name != fit_output['src_names'][j]:
                    continue
                if pname != pname2:
                    continue
                pmask0[j] = True
                pmask1[i] = True

        src_cov = fit_output['covariance'][pmask0, :][:, pmask0]
        o['param_covariance'][np.ix_(pmask1, pmask1)] = src_cov
        o['param_correlation'] = utils.cov_to_correlation(
            o['param_covariance'])

        for i, pname in enumerate(param_names):
            o['param_covariance'][i, :] *= spectral_pars[pname]['scale']
            o['param_covariance'][:, i] *= spectral_pars[pname]['scale']

        self._restore_free_params()

        self.logger.info('Fitting SED')

        # Setup background parameters for SED
        self.free_sources(False, pars='shape')
        self.free_norm(name)

        if not free_background:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        if free_radius is not None:
            diff_sources = [s.name for s in self.roi.sources if s.diffuse]
            skydir = self.roi[name].skydir
            free_srcs = [s.name for s in
                         self.roi.get_sources(skydir=skydir,
                                              distance=free_radius,
                                              exclude=diff_sources)]
            self.free_sources_by_name(free_srcs, pars='norm',
                                      loglevel=logging.DEBUG)

        if cov_scale is not None:
            self._latch_free_params()
            self.zero_source(name)
            self.fit(loglevel=logging.DEBUG, update=False)
            srcNames = list(self.like.sourceNames())
            srcNames.remove(name)
            self.constrain_norms(srcNames, cov_scale)
            self.unzero_source(name)
            self._restore_free_params()

        # Precompute fluxes in each bin from global fit
        gf_bin_flux = []
        gf_bin_index = []
        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            emin = 10 ** logemin
            emax = 10 ** logemax
            delta = 1E-5
            f = self.like[name].flux(emin, emax)
            f0 = self.like[name].flux(emin * (1 - delta), emin * (1 + delta))
            f1 = self.like[name].flux(emax * (1 - delta), emax * (1 + delta))

            if f0 > min_flux and f1 > min_flux:
                g = 1 - np.log10(f0 / f1) / np.log10(emin / emax)
                gf_bin_index += [g]
                gf_bin_flux += [f]
            else:
                gf_bin_index += [max_index]
                gf_bin_flux += [min_flux]

        old_spectrum = source.spectrum()
        old_pars = copy.deepcopy(self.roi[name].spectral_pars)
        old_type = self.roi[name]['SpectrumType']

        spectrum_pars = {
            'Prefactor':
                {'value': 1.0, 'scale': 1E-13, 'min': 1E-10,
                    'max': 1E10, 'free': True},
            'Index':
                {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0, 'free': False},
            'Scale':
                {'value': 1E3, 'scale': 1.0, 'min': 1., 'max': 1E6, 'free': False},
        }

        self.set_source_spectrum(str(name), 'PowerLaw',
                                 spectrum_pars=spectrum_pars,
                                 update_source=False)

        src_norm_idx = -1
        free_params = self.get_params(True)
        for j, p in enumerate(free_params):
            if not p['is_norm']:
                continue
            if p['is_norm'] and p['src_name'] == name:
                src_norm_idx = j

            o['correlation'][p['src_name']] = np.zeros(nbins) * np.nan

        self._fitcache = None

        for i, (logemin, logemax) in enumerate(zip(loge_bins[:-1],
                                                   loge_bins[1:])):

            logectr = 0.5 * (logemin + logemax)
            emin = 10 ** logemin
            emax = 10 ** logemax
            ectr = 10 ** logectr
            ectr2 = ectr**2

            saved_state_bin = LikelihoodState(self.like)
            if use_local_index:
                o['index'][i] = -min(gf_bin_index[i], max_index)
            else:
                o['index'][i] = -bin_index

            self.set_norm(name, 1.0, update_source=False)
            self.set_parameter(name, 'Index', o['index'][i], scale=1.0,
                               update_source=False)
            self.like.syncSrcParams(str(name))

            ref_flux = self.like[name].flux(emin, emax)

            o['ref_flux'][i] = self.like[name].flux(emin, emax)
            o['ref_eflux'][i] = self.like[name].energyFlux(emin, emax)
            o['ref_dnde'][i] = self.like[name].spectrum()(pyLike.dArg(ectr))
            o['ref_dnde_e_min'][i] = self.like[
                name].spectrum()(pyLike.dArg(emin))
            o['ref_dnde_e_max'][i] = self.like[
                name].spectrum()(pyLike.dArg(emax))
            o['ref_e2dnde'][i] = o['ref_dnde'][i] * ectr2
            cs = self.model_counts_spectrum(
                name, logemin, logemax, summed=True)
            o['ref_npred'][i] = np.sum(cs)

            normVal = self.like.normPar(name).getValue()
            flux_ratio = gf_bin_flux[i] / ref_flux
            newVal = max(normVal * flux_ratio, 1E-10)
            self.set_norm(name, newVal, update_source=False)
            self.set_norm_bounds(name, [newVal * 1E-6, newVal * 1E4])

            self.like.syncSrcParams(str(name))
            self.free_norm(name)
            self.logger.debug('Fitting %s SED from %.0f MeV to %.0f MeV' %
                              (name, emin, emax))
            self.set_energy_range(logemin, logemax)

            fit_output = self._fit(**config['optimizer'])
            free_params = self.get_params(True)
            for j, p in enumerate(free_params):

                if not p['is_norm']:
                    continue

                o['correlation'][p['src_name']][i] = \
                    fit_output['correlation'][src_norm_idx, j]

            o['fit_quality'][i] = fit_output['fit_quality']
            o['fit_status'][i] = fit_output['fit_status']

            flux = self.like[name].flux(emin, emax)
            eflux = self.like[name].energyFlux(emin, emax)
            dnde = self.like[name].spectrum()(pyLike.dArg(ectr))

            o['norm'][i] = flux / o['ref_flux'][i]
            o['flux'][i] = flux
            o['eflux'][i] = eflux
            o['dnde'][i] = dnde
            o['e2dnde'][i] = dnde * ectr2

            cs = self.model_counts_spectrum(name, logemin,
                                            logemax, summed=True)
            o['npred'][i] = np.sum(cs)
            o['loglike'][i] = fit_output['loglike']

            lnlp = self.profile_norm(name, logemin=logemin, logemax=logemax,
                                     savestate=True, reoptimize=True,
                                     npts=npts, optimizer=config['optimizer'])

            o['ts'][i] = max(
                2.0 * (fit_output['loglike'] - lnlp['loglike'][0]), 0.0)
            o['loglike_scan'][i] = lnlp['loglike']
            o['dloglike_scan'][i] = lnlp['dloglike']
            o['norm_scan'][i] = lnlp['flux'] / ref_flux

            ul_data = utils.get_parameter_limits(
                lnlp['flux'], lnlp['dloglike'])

            o['norm_err_hi'][i] = ul_data['err_hi'] / ref_flux
            o['norm_err_lo'][i] = ul_data['err_lo'] / ref_flux

            if np.isfinite(ul_data['err_lo']):
                o['norm_err'][i] = 0.5 * (ul_data['err_lo'] +
                                          ul_data['err_hi']) / ref_flux
            else:
                o['norm_err'][i] = ul_data['err_hi'] / ref_flux

            o['norm_ul95'][i] = ul_data['ul'] / ref_flux

            ul_data = utils.get_parameter_limits(lnlp['flux'],
                                                 lnlp['dloglike'],
                                                 cl_limit=ul_confidence)
            o['norm_ul'][i] = ul_data['ul'] / ref_flux

            saved_state_bin.restore()

        for t in ['flux', 'eflux', 'dnde', 'e2dnde']:

            o['%s_err' % t] = o['norm_err'] * o['ref_%s' % t]
            o['%s_err_hi' % t] = o['norm_err_hi'] * o['ref_%s' % t]
            o['%s_err_lo' % t] = o['norm_err_lo'] * o['ref_%s' % t]
            o['%s_ul95' % t] = o['norm_ul95'] * o['ref_%s' % t]
            o['%s_ul' % t] = o['norm_ul'] * o['ref_%s' % t]

        self.set_energy_range(loge_bounds[0], loge_bounds[1])
        self.set_source_spectrum(str(name), old_type,
                                 spectrum_pars=old_pars,
                                 update_source=False)

        saved_state.restore()
        self._sync_params(name)

        if cov_scale is not None:
            self.remove_priors()

        return o
Exemple #17
0
def getFit(likeIn,minE,maxE,numBins,prtMod,ENERGIES=[],expCorrect=False,wx=False):
	#make some energy bounds for the fit, same max and min as for the bands before but with more bins
	if ENERGIES==[]:
		modEs=log_array(numBins,minE,maxE)
	else:
		modEs=ENERGIES
	centEs=[0.5*(e1+e2) for e1,e2 in zip(modEs[0:-1],modEs[1:])]
	#for i in range(0,len(modEs)-1):
	#	centEs+=[0.5*(modEs[i]+modEs[i+1])]
	#most of the following (getting model and bowtie) is taken directly from David Sanchez's pyUnfoldPlot with some minor stylistic changes
	
	#check if one needs to do exposure correction to account for not using the full phase
	phCorr=likeIn.phCorr
	ubAn=likeIn.ubAn
	if expCorrect:
		for src in ubAn.sourceNames():
			par=ubAn.normPar(src)
			par.setValue(par.getValue()/phCorr)
			par.setError(par.error()/phCorr)
		#updates the values and errors of the normalization parameters...will this adequately account for bowtie info?
	
	#get the model
	mysrc=pyLike.PointSource_cast(likeIn.ubAn[likeIn.source].src)
	spec=[float(1000.*mysrc.spectrum()(pyLike.dArg(x))) for x in centEs]
	#for x in centEs:
	#	arg=pyLike.dArg(x)
	#	val=mysrc.spectrum()(arg) #gives (I believe) dN/dE spectrum in ph/cm^2/s/MeV so need to convert to ph/cm^2/s/GeV
	#	spec+=[float(1000.*val)]
	
	if(likeIn.ubAn.covariance is None):
		bt=[0]
	
	else:
		bt=[]
		covArray=num.array(likeIn.ubAn.covariance)
		srcCovArray=[]
		par_index_map={}
		indx=0
		for src in ubAn.sourceNames():
			parNames=pyLike.StringVector()
			ubAn[src].src.spectrum().getFreeParamNames(parNames)
			for par in parNames:
				par_index_map['::'.join((src,par))]=indx
				indx +=1
		srcPars=pyLike.StringVector()
		ubAn[likeIn.source].src.spectrum().getFreeParamNames(srcPars)
		pars=['::'.join((likeIn.source,x)) for x in srcPars]
		for xpar in pars:
			ix=par_index_map[xpar]
			srcCovArray.append([covArray[ix][par_index_map[ypar]] for ypar in pars])
		cov=num.array(srcCovArray)
		#the whole point here is to get the srcCovArray
		for x in centEs:
			arg=pyLike.dArg(x)
			partials=num.array([mysrc.spectrum().derivByParam(arg,y) for y in srcPars])
			val=num.sqrt(num.dot(partials,num.dot(cov,partials))) #these should come out same as the model so convert to ph/cm^2/s/GeV as well
			bt+=[float(1000.*val)]
	if prtMod==1:
		myfile=open('likeSED_%s_fullFitout.txt'%likeIn.source.replace(' ','_'),'w')
		print 'Full energy range model for %s:' %likeIn.source
		myfile.write('Full energy range model for %s:\n' %likeIn.source)
		print ubAn[likeIn.source]
		myfile.write('%s\n'%ubAn[likeIn.source])
		if ubAn.covariance is None:
			print 'Flux %.1f-%.1f GeV %.1e cm^-2 s^-1' %(likeIn.ft1EBounds[0]/1000.,likeIn.ft1EBounds[1]/1000.,ubAn.flux(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1]))
			myfile.write('(Covariance Matrix not calculated)\n')
			myfile.write('Flux %.1f-%.1f GeV %.1e cm^-2 s^-1\n' %(likeIn.ft1EBounds[0]/1000.,likeIn.ft1EBounds[1]/1000.,ubAn.flux(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1])))
		else:
			print 'Flux %.1f-%.1f GeV %.1e +/- %.1e cm^-2 s^-1' %(likeIn.ft1EBounds[0]/1000.,likeIn.ft1EBounds[1]/1000.,ubAn.flux(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1]),ubAn.fluxError(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1]))
			myfile.write('Flux %.1f-%.1f GeV %.1e +/- %.1e cm^-2 s^-1\n' %(likeIn.ft1EBounds[0]/1000.,likeIn.ft1EBounds[1]/1000.,ubAn.flux(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1]),ubAn.fluxError(likeIn.source,emin=likeIn.ft1EBounds[0],emax=likeIn.ft1EBounds[1])))
		print "Test Statistic",ubAn.Ts(likeIn.source)
		myfile.write('Test Statistic %.2f'%ubAn.Ts(likeIn.source))
		myfile.close()
		if wx:
			ubAn.writeXml('%s_fullErange_fitmodel.xml'%likeIn.source.replace(' ','_'))
	return bt,spec
Exemple #18
0
def residualsPlot(sed,plot):
	ecent=sed.centers
	Emins=sed.likeIn.bins[0]
	Emaxs=sed.likeIn.bins[1]
	fluxPts=sed.data[0]
	fluxErrs=sed.data[1]
	eminus=[]
	eplus=[]
	for x,y,z in zip(ecent,Emins,Emaxs):
		eminus+=[x-(y/1000)]
		eplus+=[(z/1000)-x]
	Eminus=num.array(eminus)
	Eplus=num.array(eplus)
	modelPts=[]
	modelErrs=[]
	#get the model
	mysrc=pyLike.PointSource_cast(sed.likeIn.ubAn[sed.likeIn.source].src)
	for x in ecent: #using the same center energies as used in the data
		MeV=x*1000. #ecent is in GeV, but mysrc.spectrum assumes MeV
		arg=pyLike.dArg(MeV)
		val=mysrc.spectrum()(arg)
		modelPts+=[float(val)] 
	#allow for the possibility of the model points have errors too
	if(sed.likeIn.ubAn.covariance is None):
		modelErrs=[0]*len(modelPts)
	else:
		covArray=num.array(sed.likeIn.ubAn.covariance)
		srcCovArray=[]
		par_index_map={}
		indx=0
		for src in sed.likeIn.ubAn.sourceNames():
			parNames=pyLike.StringVector()
			sed.likeIn.ubAn[src].src.spectrum().getFreeParamNames(parNames)
			for par in parNames:
				par_index_map['::'.join((src,par))]=indx
				indx +=1
		srcPars=pyLike.StringVector()
		sed.likeIn.ubAn[sed.likeIn.source].src.spectrum().getFreeParamNames(srcPars)
		pars=['::'.join((sed.likeIn.source,x)) for x in srcPars]
		for xpar in pars:
			ix=par_index_map[xpar]
			srcCovArray.append([covArray[ix][par_index_map[ypar]] for ypar in pars])
		cov=num.array(srcCovArray)
		#the whole point here is to get the srcCovArray
		for x in ecent:
			MeV=x*1000.
			arg=pyLike.dArg(MeV)
			partials=num.array([mysrc.spectrum().derivByParam(arg,y) for y in srcPars])
			val=num.sqrt(num.dot(partials,num.dot(cov,partials))) #these should come out same as the model so convert to ph/cm^2/s/GeV as well
			modelErrs+=[float(val)]
	#calculate the residuals
	resids=[]
	residErrs=[]
	if len(sed.data[3])==0:
		for a,b,c,d in zip(fluxPts,fluxErrs,modelPts,modelErrs):
			resids+=[(a/c)-1.] #do (data-model)/model which works out to (data/model)-1
			residErrs+=[(a/c)*num.sqrt((d/c)**2+(b/a)**2)]
	else:
		newfluxPts=[]
		newfluxErrs=[]
		for m,M,f,e,g,c in zip(Emins,Emaxs,fluxPts,fluxErrs,sed.data[3],ecent):
			newfluxPts+=[f*(-g+1)*(c*1000.)**(-g)/(M**(-g+1)-m**(-g+1))] #works out to be prefactor assuming Scale parameter (E0) of c (center energy of bin)
			newfluxErrs+=[e*(-g+1)*(c*1000.)**(-g)/(M**(-g+1)-m**(-g+1))]#similar for the errors
		for a,b,c,d in zip(newfluxPts,newfluxErrs,modelPts,modelErrs):
			resids+=[(a/c)-1.] #do (data-model)/model which works out to (data/model)-1
			residErrs+=[(a/c)*num.sqrt((d/c)**2+(b/a)**2)]
	#now lets make the plots
	Resids=num.array(resids)
	RErrors=num.array(residErrs)
	energies=num.array(ecent)
	rgraph=TGraphAsymmErrors(len(energies),energies,Resids,Eminus,Eplus,RErrors,RErrors)
	gStyle.SetOptStat(0)
	gStyle.SetOptTitle(0)
	rhist=TH1F("rhist","",100,Emins[0]/1000.,Emaxs[-1]/1000.)
	rhist.GetYaxis().SetRangeUser(-1.,1.)
	rhist.SetLineStyle(2)
	rhist.SetFillColor(0)
	rhist.SetXTitle('Energy (GeV)')
	rhist.SetYTitle('(data-model)/model')
	rhist.GetXaxis().CenterTitle()
	rhist.GetYaxis().CenterTitle()
	rhist.SetMarkerStyle(20)
	rhist.SetMarkerSize(0.6)
	rcan=TCanvas('rcan','Residuals',700,350)
	rcan.SetFillColor(0)
	rcan.SetFrameBorderMode(0)
	rcan.SetBorderMode(0)
	rcan.SetTicks(1,1)
	rcan.SetLogx()
	#save as a .eps file
	rps=TPostScript('%s_%ibins_residuals.eps' %(sed.likeIn.source.replace(' ','_'),sed.likeIn.NBins),113)
	rcan.cd()
	rhist.Draw('')
	rgraph.Draw('psame')
	rps.Close()
	if(plot==True):
		SetOwnership(rcan,False)
		SetOwnership(rhist,False)
		SetOwnership(rgraph,False)
	#save as a root file
	rfile=TFile('%s_%ibins_residuals.root' %(sed.likeIn.source.replace(' ','_'),sed.likeIn.NBins),'RECREATE')
	rcan.Write()
	return
Exemple #19
0
 def get_dnde(spectrum, energies):
     """ Returns the spectrum in units of ph/cm^2/s/MeV. """
     return np.asarray([spectrum(dArg(i)) for i in energies])