예제 #1
0
def test_scan_poiss():
    n = nptfit.NPTF(tag='Test_Poiss')

    fermi_data = np.array([2, 1, 1, 1, 4, 10]).astype(np.int32)
    fermi_exposure = np.array([1., 1., 1., 2., 2., 2.])
    n.load_data(fermi_data, fermi_exposure)

    analysis_mask = np.array([False, False, False, False, False, True])
    n.load_mask(analysis_mask)

    dif = np.array([1., 2., 3., 4., 5., 6.])
    iso = np.array([1., 1., 1., 1., 1., 1.])

    n.add_template(dif, 'dif')
    n.add_template(iso, 'iso')
    n.add_template(iso, 'iso_f', units='flux')
    n.add_template(iso, 'iso_PS', units='PS')

    n.add_poiss_model('dif', '$A_\mathrm{dif}$', [0, 30], False)
    n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0, 5], False)

    n.configure_for_scan()

    n.perform_scan(nlive=50)

    n.load_scan()

    n = nptfit.NPTF(tag='Test_Poiss')

    n.load_data(fermi_data, fermi_exposure)

    n.add_template(dif, 'dif')

    n.add_poiss_model('dif', '$A_\mathrm{dif}$', [0, 30], False)

    n.configure_for_scan()

    n.perform_scan(nlive=50)

    n.load_scan()
예제 #2
0
    def __init__(self,A_array,DM_template,counts,exposure,template_dict,ps_mask,ell,b,rad=5,tag='tmp',nlive=500): 
        #rad in deg
        self.DM_template = DM_template
        self.A_array = A_array

        self.ell = ell
        self.b = b
        self.rad = rad
        self.make_mask(ps_mask)

        self.compute_DM_intensity_base(exposure)

        self.n = nptfit.NPTF(tag=tag)
        self.n.load_data(counts, exposure)
        self.n.load_mask(self.total_mask)

        self.setup_base_n(template_dict)
        self.perform_scan(500)
예제 #3
0
    def __init__(self,counts,exposure,template_dict,ps_mask,ell,b,rad=20,tag='tmp',nlive=500): 
        #rad in deg
        self.ell = ell
        self.b = b
        self.rad = rad
        self.make_mask(ps_mask)

        self.n = nptfit.NPTF(tag=tag)
        self.n.load_data(counts, exposure)
        self.n.load_mask(self.total_mask)

        active_pixels = np.sum(np.logical_not(self.total_mask))
        if active_pixels > 0:
            self.configure_for_scan(template_dict)
            self.perform_scan(500)
            self.make_new_template()
        else:
            print "No data!"
            self.new_template_dict = None
예제 #4
0
    def scan(self):

        ################
        # Fermi plugin #
        ################

        # Load the Fermi plugin - always load all energy bins, extract what is needed
        f_global = fp.fermi_plugin(maps_dir,
                                   fermi_data_dir=fermi_data_dir,
                                   work_dir=work_dir,
                                   CTB_en_min=0,
                                   CTB_en_max=40,
                                   nside=self.nside,
                                   eventclass=self.eventclass,
                                   eventtype=self.eventtype,
                                   newstyle=1,
                                   data_July16=True)

        # Load necessary templates
        f_global.add_diffuse_newstyle(comp=self.diff,
                                      eventclass=self.eventclass,
                                      eventtype=self.eventtype)
        f_global.add_iso()
        ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy')
        f_global.add_template_by_hand(comp='ps_model', template=ps_temp)

        ###################
        # Get DM halo map #
        ###################

        l = self.catalog.l.values[self.iobj]
        b = self.catalog.b.values[self.iobj]

        rs = self.catalog.rs.values[self.iobj] * 1e-3
        if self.boost:
            J0 = 10**self.catalog.mulog10J_inf.values[self.iobj]
        else:
            J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj]
        mk = mkDMMaps.mkDMMaps(z=self.catalog.z[self.iobj],
                               r_s=rs,
                               J_0=J0,
                               ell=l * np.pi / 180,
                               b=b * np.pi / 180,
                               nside=self.nside,
                               use_boost=self.use_boost,
                               Burkert=self.Burkert)
        DM_template_base = mk.map

        #########################################
        # Loop over energy bins to get spectrum #
        #########################################

        # 10 deg mask for the analysis
        analysis_mask_base = cm.make_mask_total(mask_ring=True,
                                                inner=0,
                                                outer=10,
                                                ring_b=b,
                                                ring_l=l)

        # ROI where we will normalise our templates
        ROI_mask = cm.make_mask_total(mask_ring=True,
                                      inner=0,
                                      outer=2,
                                      ring_b=b,
                                      ring_l=l)
        ROI = np.where(ROI_mask == 0)[0]

        # Setup output
        output_norms = np.zeros((self.emax + 1 - self.emin, 4, 2))

        for iebin, ebin in tqdm(enumerate(np.arange(self.emin, self.emax + 1)),
                                disable=1 - self.verbose):

            ######################
            # Templates and maps #
            ######################

            if self.verbose:
                print "At bin", ebin

            data = f_global.CTB_count_maps[ebin].astype(np.float64)
            # Add large scale mask to analysis mask
            els_str = [
                '0.20000000', '0.25178508', '0.31697864', '0.39905246',
                '0.50237729', '0.63245553', '0.79621434', '1.0023745',
                '1.2619147', '1.5886565', '2.0000000', '2.5178508',
                '3.1697864', '3.9905246', '5.0237729', '6.3245553',
                '7.9621434', '10.023745', '12.619147', '15.886565',
                '20.000000', '25.178508', '31.697864', '39.905246',
                '50.237729', '63.245553', '79.621434', '100.23745',
                '126.19147', '158.86565', '200.00000', '251.78508',
                '316.97864', '399.05246', '502.37729', '632.45553',
                '796.21434', '1002.3745', '1261.9147', '1588.6565'
            ]
            ls_mask_load = fits.open(
                '/tigress/nrodd/LargeObjMask/Allpscmask_3FGL-energy' +
                els_str[ebin] + 'large-obj.fits')
            ls_mask = np.array([
                np.round(val) for val in hp.ud_grade(
                    ls_mask_load[0].data, self.nside, power=0)
            ])
            analysis_mask = np.vectorize(bool)(analysis_mask_base + ls_mask)

            fermi_exposure = f_global.CTB_exposure_maps[ebin]

            DM_template = DM_template_base * fermi_exposure / np.sum(
                DM_template_base * fermi_exposure)
            ksi = ks.king_smooth(maps_dir,
                                 ebin,
                                 self.eventclass,
                                 self.eventtype,
                                 threads=1)
            DM_template_smoothed = ksi.smooth_the_map(DM_template)
            DM_intensity_base = np.sum(DM_template_smoothed / fermi_exposure)

            dif = f_global.template_dict[self.diff][ebin]
            iso = f_global.template_dict['iso'][ebin]
            psc = f_global.template_dict['ps_model'][ebin]

            # Get mean values in ROI
            dif_mu = np.mean(dif[ROI])
            iso_mu = np.mean(iso[ROI])
            psc_mu = np.mean(psc[ROI])
            DM_mu = np.mean(DM_template_smoothed[ROI])
            exp_mu = np.mean(fermi_exposure[ROI])

            ####################
            # NPTFit norm scan #
            ####################

            n = nptfit.NPTF(tag='norm_o' + str(self.iobj) + '_E' + str(ebin) +
                            self.mc_tag)
            n.load_data(data, fermi_exposure)

            n.load_mask(analysis_mask)

            n.add_template(dif, self.diff)
            n.add_template(iso, 'iso')
            n.add_template(psc, 'psc')
            n.add_template(DM_template_smoothed, 'DM')

            n.add_poiss_model(self.diff, '$A_\mathrm{dif}$', [0, 10], False)
            n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0, 20], False)
            n.add_poiss_model('psc', '$A_\mathrm{psc}$', [0, 10], False)
            n.add_poiss_model('DM', '$A_\mathrm{DM}$', [0, 1000], False)

            n.configure_for_scan()

            ##########
            # Minuit #
            ##########

            keys = n.poiss_model_keys
            limit_dict = {}
            init_val_dict = {}
            step_size_dict = {}
            for key in keys:
                if key == 'DM':
                    limit_dict['limit_' + key] = (0, 1000)
                else:
                    limit_dict['limit_' + key] = (0, 50)
                init_val_dict[key] = 0.0
                step_size_dict['error_' + key] = 1.0
            other_kwargs = {'print_level': self.verbose, 'errordef': 1}
            z = limit_dict.copy()
            z.update(other_kwargs)
            z.update(limit_dict)
            z.update(init_val_dict)
            z.update(step_size_dict)
            f = call_ll(len(keys), n.ll, keys)
            m = Minuit(f, **z)
            m.migrad(ncall=30000, precision=1e-14)

            # Output spectra in E^2 dN/dE, in units [GeV/cm^2/s/sr] as mean in 2 degrees
            output_norms[iebin, 0, 0] = m.values[
                'p8'] * dif_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]
            output_norms[iebin, 0, 1] = m.errors[
                'p8'] * dif_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]

            output_norms[iebin, 1, 0] = m.values[
                'iso'] * iso_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]
            output_norms[iebin, 1, 1] = m.errors[
                'iso'] * iso_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]

            output_norms[iebin, 2, 0] = m.values[
                'psc'] * psc_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]
            output_norms[iebin, 2, 1] = m.errors[
                'psc'] * psc_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]

            output_norms[iebin, 3, 0] = m.values[
                'DM'] * DM_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]
            output_norms[iebin, 3, 1] = m.errors[
                'DM'] * DM_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin]

            ###################################
            # NPTFit fixed DM and bkg profile #
            ###################################

            # Make background sum and initiate second scan
            # If was no data leave bkg_sum as 0
            bkg_sum = np.zeros(len(data))
            if np.sum(data * np.logical_not(analysis_mask)) != 0:
                for key in keys:
                    if key != 'DM':  # Don't add DM in here
                        if m.values[key] != 0:
                            bkg_sum += n.templates_dict[key] * m.values[key]
                        else:  # If zero, use ~parabolic error
                            bkg_sum += n.templates_dict[key] * m.errors[
                                key] / 2.

            nDM = nptfit.NPTF(tag='dm_o' + str(self.iobj) + '_E' + str(ebin) +
                              self.mc_tag)
            nDM.load_data(data, fermi_exposure)
            nDM.add_template(bkg_sum, 'bkg_sum')

            # If there is no data, only go over pixels where DM is non-zero
            if np.sum(data * np.logical_not(analysis_mask)) != 0:
                nDM.load_mask(analysis_mask)
            else:
                nodata_mask = DM_template_smoothed == 0
                nDM.load_mask(nodata_mask)
            nDM.add_poiss_model('bkg_sum',
                                '$A_\mathrm{bkg}$',
                                fixed=True,
                                fixed_norm=1.0)

        np.save(self.save_dir + 'spec_o' + str(self.iobj) + self.mc_tag,
                output_norms)
예제 #5
0
# This file is called by Example10_HighLat_Batch.batch, and must be run before
# using Example10_HighLat_Analysis.ipynb
# The scan performs a run over the high latitude sky

# NB: this example makes use of the Fermi Data, which needs to already be
# installed. See Example 1 for details.

import numpy as np

from NPTFit import nptfit  # module for performing scan
from NPTFit import create_mask as cm  # module for creating the mask
from NPTFit import psf_correction as pc  # module for determining the PSF correction

n = nptfit.NPTF(tag='HighLat_Example')

fermi_data = np.load('fermi_data/fermidata_counts.npy').astype(np.int32)
fermi_exposure = np.load('fermi_data/fermidata_exposure.npy')
n.load_data(fermi_data, fermi_exposure)

analysis_mask = cm.make_mask_total(band_mask=True, band_mask_range=50)

n.load_mask(analysis_mask)

dif = np.load('fermi_data/template_dif.npy')
iso = np.load('fermi_data/template_iso.npy')

n.add_template(dif, 'dif')
n.add_template(iso, 'iso')
n.add_template(np.ones(len(iso)), 'iso_np', units='PS')

n.add_poiss_model('dif', '$A_\mathrm{dif}$', [0, 30], False)
예제 #6
0
    def scan(self):

        print("Getting into scan")

        ################
        # Fermi plugin #
        ################
        
        print("Loading Fermi plugin...")
        # Load the Fermi plugin - always load all energy bins, extract what is needed
        f_global = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=self.eventclass,eventtype=self.eventtype,newstyle=1,data_July16=True)
        print("... done")

        # Load necessary templates
        f_global.add_diffuse_newstyle(comp = self.diff,eventclass = self.eventclass, eventtype = self.eventtype) 
        f_global.add_iso()  
        ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy')
        f_global.add_template_by_hand(comp='ps_model',template=ps_temp)
        f_global.add_bubbles()

        # If Asimov normalize the templates and create a summed map
        if self.Asimov:
            norm_file = work_dir + '/DataFiles/Misc/P8UCVA_norm.npy' 
            f_global.use_template_normalization_file(norm_file,key_suffix='-0')
            Asimov_data = np.zeros((40,hp.nside2npix(self.nside)))
            for key in f_global.template_dict.keys():
                Asimov_data += np.array(f_global.template_dict[key]) 

        ###################
        # Get DM halo map #
        ###################

        print("Getting halo map...")
        if not self.randlocs: # If doing random locations
            l = self.catalog.l.values[self.iobj]
            b = self.catalog.b.values[self.iobj]
        else:
            badval = True
            while (badval):
                test_ell = np.random.uniform(0.,2*np.pi)
                test_b = np.arccos(np.random.uniform(-1.,1.))-np.pi/2.
                test_pixval = hp.ang2pix(self.nside, test_b+np.pi/2, test_ell)
                ps0p5_mask = np.load(work_dir + '/DataFiles/Misc/mask0p5_3FGL.npy') > 0

                # Check if not masked with plan or PS mask
                if ( (np.abs(test_b)*180./np.pi > 20. ) & (ps0p5_mask[test_pixval] == 0)):
                    badval = False
                    l = test_ell*180./np.pi
                    b = test_b*180./np.pi
            np.savetxt(self.save_dir + "/lb_obj"+str(self.iobj) + ".dat", np.array([l, b]))

        rs = self.catalog.rs.values[self.iobj]*1e-3
        if self.boost:
            J0 = 10**self.catalog.mulog10J_inf.values[self.iobj]
        else:
            J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj]
        mk = mkDMMaps.mkDMMaps(z = self.catalog.z[self.iobj], r_s = rs , J_0 = J0, ell = l*np.pi/180, b = b*np.pi/180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert)
        DM_template_base = mk.map
        print("...done")

        #########################################
        # Loop over energy bins to get xsec LLs #
        #########################################

        A_ary = 10**np.linspace(-6,6,200)
        LL_inten_ary = np.zeros((len(self.ebins)-1,len(A_ary)))
        inten_ary = np.zeros((len(self.ebins)-1,len(A_ary)))

        # 10 deg mask for the analysis
        analysis_mask = cm.make_mask_total(mask_ring = True, inner = 0, outer = 10, ring_b = b, ring_l = l)

        for iebin, ebin in tqdm(enumerate(np.arange(self.emin,self.emax+1)), disable = 1 - self.verbose):
            
            ######################
            # Templates and maps #
            ######################

            if self.verbose:
                print "At bin", ebin

            if self.imc != -1:
                data = np.load(mc_dir + 'MC_allhalos_p7_' + self.dm_string + '_v' + str(self.imc)+'.npy')[ebin].astype(np.float64)
            else:
                data = f_global.CTB_count_maps[ebin].astype(np.float64)

            fermi_exposure = f_global.CTB_exposure_maps[ebin]

            DM_template = DM_template_base*fermi_exposure/np.sum(DM_template_base*fermi_exposure)
            print("Loading smoothing class...")
            ksi = ks.king_smooth(maps_dir, ebin, self.eventclass, self.eventtype, threads=1)
            print("...done!")
            print("Beginning to smooth...")
            DM_template_smoothed = ksi.smooth_the_map(DM_template)
            print("...done!")
            DM_intensity_base = np.sum(DM_template_smoothed/fermi_exposure)
            
            dif = f_global.template_dict[self.diff][ebin]
            iso = f_global.template_dict['iso'][ebin]
            psc = f_global.template_dict['ps_model'][ebin]
            bub = f_global.template_dict['bubs'][ebin]

            # If doing Asimov this first scan is irrelevant, but takes no time so run
            
            ####################
            # NPTFit norm scan #
            ####################
            
            n = nptfit.NPTF(tag='norm_o'+str(self.iobj)+'_E'+str(ebin)+self.mc_tag)
            n.load_data(data, fermi_exposure)

            n.load_mask(analysis_mask)

            n.add_template(dif, self.diff)
            n.add_template(iso, 'iso')
            n.add_template(psc, 'psc')
            n.add_template(bub, 'bub')

            n.add_poiss_model(self.diff, '$A_\mathrm{dif}$', [0,10], False)
            n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0,20], False)
            
            if (np.sum(bub*np.logical_not(analysis_mask)) != 0):
                n.add_poiss_model('bub', '$A_\mathrm{bub}$', [0,10], False)

            # # Add PS at halo location
            # ps_halo_map = np.zeros(hp.nside2npix(self.nside))
            # ps_halo_idx = hp.ang2pix(self.nside, np.pi/2. - b*np.pi/180., l*np.pi/180.) # ell and b are in rad
            # ps_halo_map[ps_halo_idx] = 1.
            # ps_halo_map_smoothed = ksi.smooth_the_map(ps_halo_map) # smooth it
            # n.add_template(ps_halo_map_smoothed,'ps_halo')
            # n.add_poiss_model('ps_halo', 'ps_halo', [0,100], False)

            if self.floatDM:
                if ebin >= 7: 
                    # Don't float DM in initial scan for < 1 GeV. Below here
                    # Fermi PSF is so large that we find the DM often picks up
                    # spurious excesses in MC.
                    n.add_template(DM_template_smoothed, 'DM')
                    n.add_poiss_model('DM', '$A_\mathrm{DM}$', [0,1000], False)

            if self.float_ps_together:
                n.add_poiss_model('psc', '$A_\mathrm{psc}$', [0,10], False)
            else:
                # Astropy-formatted coordinates of cluster
                c2 = SkyCoord("galactic", l=[l]*u.deg, b=[b]*u.deg)
                idx3fgl_10, _, _, _ = c2.search_around_sky(self.c3, 10*u.deg)
                idx3fgl_18, _, _, _ = c2.search_around_sky(self.c3, 18*u.deg)
                
                ps_map_outer = np.zeros(hp.nside2npix(self.nside))
                for i3fgl in idx3fgl_18:
                    ps_file = np.load(ps_indiv_dir + '/ps_temp_128_5_'+str(self.eventtype)+'_'+str(i3fgl)+'.npy')
                    ps_map = np.zeros(hp.nside2npix(self.nside))
                    ps_map[np.vectorize(int)(ps_file[::,ebin,0])] = ps_file[::,ebin,1]
                    if i3fgl in idx3fgl_10: # If within 10 degrees, float individually
                        n.add_template(ps_map, 'ps_'+str(i3fgl))
                        n.add_poiss_model('ps_'+str(i3fgl), '$A_\mathrm{ps'+str(i3fgl)+'}$', [0,10], False)
                    else: # Otherwise, add to be floated together
                        ps_map_outer += ps_map

                if np.sum(ps_map_outer) != 0:
                    n.add_template(ps_map_outer, 'ps_outer')
                    n.add_poiss_model('ps_outer', '$A_\mathrm{ps_outer}$', [0,10], False)
                
            n.configure_for_scan()

            ##########
            # Minuit #
            ##########

            # Skip this step if there is 0 data (higher energy bins)
            if np.sum(data*np.logical_not(analysis_mask)) != 0: 
                keys = n.poiss_model_keys
                limit_dict = {}
                init_val_dict = {}
                step_size_dict = {}
                for key in keys:
                    if key == 'DM':
                        limit_dict['limit_'+key] = (0,1000)
                    else:
                        limit_dict['limit_'+key] = (0,50)
                    init_val_dict[key] = 0.0
                    step_size_dict['error_'+key] = 1.0
                other_kwargs = {'print_level': self.verbose, 'errordef': 1}
                z = limit_dict.copy()
                z.update(other_kwargs)
                z.update(limit_dict)
                z.update(init_val_dict)
                z.update(step_size_dict)
                f = call_ll(len(keys),n.ll,keys)
                m = Minuit(f,**z)
                m.migrad(ncall=30000, precision=1e-14)
                
            ###################################
            # NPTFit fixed DM and bkg profile #
            ###################################
            
            # Make background sum and initiate second scan
            # If was no data leave bkg_sum as 0
            bkg_sum = np.zeros(len(data))
            if np.sum(data*np.logical_not(analysis_mask)) != 0:
                for key in keys:
                    if key != 'DM': # Don't add DM in here
                        if m.values[key] != 0:
                            bkg_sum += n.templates_dict[key]*m.values[key]
                        else: # If zero, use ~parabolic error
                            bkg_sum += n.templates_dict[key]*m.errors[key]/2.
            
            
            nDM = nptfit.NPTF(tag='dm_o'+str(self.iobj)+'_E'+str(ebin)+self.mc_tag)
            if self.Asimov: # Use background expectation for the data
                nDM.load_data(Asimov_data[ebin], fermi_exposure)
                nDM.add_template(Asimov_data[ebin], 'bkg_sum')
            else:
                nDM.load_data(data, fermi_exposure)
                nDM.add_template(bkg_sum, 'bkg_sum')
            
            # If there is no data, only go over pixels where DM is non-zero
            if np.sum(data*np.logical_not(analysis_mask)) != 0:
                nDM.load_mask(analysis_mask)
            else:
                nodata_mask = DM_template_smoothed == 0
                nDM.load_mask(nodata_mask)
            nDM.add_poiss_model('bkg_sum', '$A_\mathrm{bkg}$', fixed=True, fixed_norm=1.0)
            
            #####################
            # Get intensity LLs #
            #####################
                               
            for iA, A in enumerate(A_ary):
                new_n2 = copy.deepcopy(nDM)
                new_n2.add_template(A*DM_template_smoothed,'DM')
                new_n2.add_poiss_model('DM','DM',False,fixed=True,fixed_norm=1.0)
                new_n2.configure_for_scan()
                max_LL = new_n2.ll([])
                
                LL_inten_ary[iebin, iA] = max_LL
                inten_ary[iebin, iA] = DM_intensity_base*A

        np.savez(self.save_dir + 'LL_inten_o'+str(self.iobj)+self.mc_tag, LL=LL_inten_ary, intens=inten_ary)
# This file is called by Example8_Galactic_Center_Batch.batch
# The scan performs a run over the inner galaxy

# NB: this example makes use of the Fermi Data, which needs to already be installed. See Example 1 for details.

import numpy as np

from NPTFit import nptfit  # module for performing scan
from NPTFit import create_mask as cm  # module for creating the mask
from NPTFit import psf_correction as pc  # module for determining the PSF correction

n = nptfit.NPTF(tag='GCE_Example')

fermi_data = np.load('fermi_data/fermidata_counts.npy').astype(np.int32)
fermi_exposure = np.load('fermi_data/fermidata_exposure.npy')
n.load_data(fermi_data, fermi_exposure)

pscmask = np.array(np.load('fermi_data/fermidata_pscmask.npy'), dtype=bool)
analysis_mask = cm.make_mask_total(band_mask=True,
                                   band_mask_range=2,
                                   mask_ring=True,
                                   inner=0,
                                   outer=30,
                                   custom_mask=pscmask)
n.load_mask(analysis_mask)

dif = np.load('fermi_data/template_dif.npy')
iso = np.load('fermi_data/template_iso.npy')
bub = np.load('fermi_data/template_bub.npy')
gce = np.load('fermi_data/template_gce.npy')
dsk = np.load('fermi_data/template_dsk.npy')
예제 #8
0
def test_scan_non_poiss():
    n = nptfit.NPTF(tag='Test_NPoiss')

    fermi_data = np.array([2, 1, 1, 1, 4, 10]).astype(np.int32)
    fermi_exposure = np.array([1., 1., 1., 2., 2., 2.])
    n.load_data(fermi_data, fermi_exposure)

    analysis_mask = np.array([False, False, False, False, False, True])

    dif = np.array([1., 2., 3., 4., 5., 6.])
    iso = np.array([1., 1., 1., 1., 1., 1.])

    n.add_template(dif, 'dif')
    n.add_template(iso, 'iso')

    n.add_poiss_model('iso','$A_\mathrm{iso}$', False, fixed=True, fixed_norm=1.47)
    n.add_non_poiss_model('iso',
                          ['$A^\mathrm{ps}_\mathrm{iso}$','$n_1$','$n_2$','$S_b$'],
                          [[-6,1],[2.05,30],[-2,1.95]],
                          [True,False,False],
                          fixed_params = [[3,22.]])

    pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812)
    f_ary, df_rho_div_f_ary = pc_inst.f_ary, pc_inst.df_rho_div_f_ary

    n.configure_for_scan(f_ary=f_ary, df_rho_div_f_ary=df_rho_div_f_ary, nexp=1)

    n.perform_scan(nlive=50)

    n.load_scan()
    n.load_scan()

    n = nptfit.NPTF(tag='Test_NPoiss')
    n.load_data(fermi_data, fermi_exposure)
    n.add_template(iso, 'iso')
    n.add_non_poiss_model('iso',
                          ['$A^\mathrm{ps}_\mathrm{iso}$','$n_1$','$n_2$','$S_{b1}$'],
                          [[0,1]],
                          units='flux',fixed_params=[[1,2.],[2,-2.],[3,100.]])
    n.configure_for_scan(nexp=len(dif)+1)
    n.perform_scan(nlive=50)

    n = nptfit.NPTF(tag='Test_NPoiss')
    n.load_data(fermi_data, fermi_exposure)
    n.add_template(iso, 'iso')
    n.add_non_poiss_model('iso',
                          ['$A^\mathrm{ps}_\mathrm{iso}$','$n_1$','$n_2$','$n_3','$S_{b1}$','$S_{b2}$'],
                          [[0,1],[0,10],[0,1.]],
                          units='flux',fixed_params=[[1,2.],[2,-2.],[3,1.0]],
                          dnds_model='specify_relative_breaks')
    n.configure_for_scan()
    n.perform_scan(nlive=50)

    n = nptfit.NPTF(tag='Test_NPoiss')
    n.load_data(fermi_data, fermi_exposure)
    n.add_template(iso, 'iso')
    n.add_non_poiss_model('iso',
                          ['$A^\mathrm{ps}_\mathrm{iso}$','$n_1$','$n_2$','$n_3','$S_{b1}$','$S_{b2}$'],
                          [[0,200]],
                          units='flux',fixed_params=[[0,1.],[1,2.],[2,1.],[3,-2],[5,250]])
    n.configure_for_scan()
    n.perform_scan(nlive=50)

    n = nptfit.NPTF(tag='Test_NPoiss')
    n.load_data(fermi_data, fermi_exposure)
    n.add_template(iso, 'iso')
    n.add_non_poiss_model('iso',
                          ['$A^\mathrm{ps}_\mathrm{iso}$','$n_1$','$n_2$','$n_3','$S_{b1}$','$S_{b2}$'],
                          [[200,400]],
                          units='flux',dnds_model='specify_relative_breaks',
                          fixed_params=[[0,1.],[1,2.],[2,1.],[3,-2],[4,150]])
    n.configure_for_scan()
    n.perform_scan(nlive=50)