def main(): # Define basic vars maps_dir = '/zfs/bsafdi/data/' fermi_data_dir = '/zfs/tslatyer/fermimaps/allsky/' work_dir = '/zfs/nrodd/NPTFWorking' emin=0 emax=1 nside=256 eventclass=5 eventtype=3 newstyle=1 ps_file = '/zfs/nrodd/NPTFWorking/data/ps_data/ps_lists/3FGL_0to15_tiny.txt' n_ps_run=10 indeg=True loadforpsf = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) loadforpsf.load_psf(data_name='p8',fits_file_path = 'False',eventclass=eventclass,eventtype=eventtype) sigma_PSF_deg = loadforpsf.sigma_PSF_deg[0:-1] #print "exp map:",loadforpsf.CTB_exposure_maps #print "len(loadforpsf.CTB_exposure_maps)",len(loadforpsf.CTB_exposure_maps) #print "len(loadforpsf.CTB_exposure_maps[0])",len(loadforpsf.CTB_exposure_maps[0]) #print "mean(loadforpsf.CTB_exposure_maps[0])",np.mean(loadforpsf.CTB_exposure_maps[0]) #print "sum(loadforpsf.CTB_exposure_maps[0])",np.sum(loadforpsf.CTB_exposure_maps[0],axis=0) #print "PSF_deg:",sigma_PSF_deg larr,barr,n_ps,n_ps_border = trimorderpslist(ps_file,indeg,sigma_PSF_deg) #print "n_ps:",n_ps #print "n_ps_border:",n_ps_border #print "l:",larr*180/np.pi #print "b:",barr*180/np.pi ps_norms=np.array([1,1,1,1]) loadforpsf.add_multiple_ps_king(larr,barr,rescale=ps_norms,comp='ps_comb') print "MADE IT OUT" n_groups = int(np.ceil(float(n_ps+n_ps_border)/float(n_ps_run))) n_last_group = n_ps - (n_groups-1)*n_ps_run
def setup_fermi(self): """ Setup the Fermi plugin """ eventclass=5 # 2 (Source) or 5 (UltracleanVeto) eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles) mask_type='top300' force_mask_at_bin_number=8 self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True) if mask_type != 'False': self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number) self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype) self.f1.add_bubbles(comp='bubs') #bubbles self.f1.add_iso(comp='iso') #iso self.f1.add_ps_model(comp='ps_model') # Exposure correct J_map_arr self.J_map_arr *= self.f1.CTB_exposure_maps # Add J-factor map with mean 1 in each energy bin self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))
def setup_for_scan(): global f, b, new_template_dict, n_ps, newstyle # Load Fermi Plugin and its basic functionality f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) if mask_type!='False': f.make_ps_mask(mask_type=mask_type,force_energy=force_ps_mask,energy_bin=force_ps_mask_bin) # Add appropriate templates f.add_diffuse_newstyle(comp = diff, eventclass = eventclass, eventtype = eventtype) if extraicstemp!='False': f.add_diffuse_newstyle(comp = extraicstemp, eventclass = eventclass, eventtype = eventtype) f.add_bubbles(comp = 'bubs') f.add_iso(comp = 'iso') if add_ps_model: f.add_ps_model(comp = 'ps_model') if nfw_dm: f.add_nfw(comp = 'nfw') f.add_template_from_file('disk',disk_file_name) # always add the disk here even if don't use as often needed for NPTF if norm_file!='False': if len(add_norm_file_for_comps) > 0 and add_norm_file != 'False': print 'using first normalization file: ', norm_file f.use_template_normalization_file(norm_file,key_suffix='-0',dont_use_keys=add_norm_file_for_comps) print 'using second normalization file: ', add_norm_file f.use_template_normalization_file(add_norm_file,key_suffix='-0',use_keys=add_norm_file_for_comps) else: f.use_template_normalization_file(norm_file,key_suffix='-0') #### For now, combine diff, bubs, iso, ps_model, IC to reduce number of parameters new_template_dict = {} new_template_dict['back']=np.zeros(np.shape(f.template_dict['bubs'])) for key in f.template_dict.keys(): new_template_dict[key] = f.template_dict[key] if key not in not_in_simplified_template_list: #key != 'nfw' and key != 'disk' and print 'Including template', key, 'in background template' new_template_dict['back']+=f.template_dict[key] # else: # new_template_dict[key] = f.template_dict[key] # Load PSF - this is only required for NPTF scans # This now uses a King function if not poiss: f.load_psf_kings() f.make_f_ary_kings(psf_model,energy_averaged=True,psf_dir=psf_dir,psf_save_tag=run_tag_energy) # Setup Bayesian scan for fermi b = bsm.bayesian_scan_NPTF(tag=tag,work_dir = work_dir,psf_dir = psf_dir,nside=nside,nlive=nlive,k_max=k_max) if fake_data: data = np.loadtxt(fake_data_path) else: data = f.CTB_count_maps b.load_external_data(f.CTB_en_bins,data,f.CTB_exposure_maps) if use_simplified_templates: b.add_new_template(new_template_dict) if fixed_background: b.add_fixed_templates({'back':new_template_dict['back']}) else: b.add_new_template(f.template_dict) if mask_type!='False': b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer, ps_mask_array = f.ps_mask_array) else: b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer) b.rebin_external_data(n_ebins) b.compress_templates() ### Add in the Templates if method != 'minuit': if not min_prior_range: b.add_poiss_model(diff,'$A_{diff}$',[-10,10],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-10,10],False) b.add_poiss_model('iso','$A_{iso}$',[-10,10],False) b.add_poiss_model('bubs','$A_{bubs}$',[-10,10],False) else: # If already have priors, don't scan over such a large range b.add_poiss_model(diff,'$A_{diff}$',[0.5,1.5],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[0.5,1.5],False) if high_lat: b.add_poiss_model('iso','$A_{iso}$',[0.5,1.5],False) else: b.add_poiss_model('iso','$A_{iso}$',[0.8,1.2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0.5,1.5],False) else: b.add_poiss_model(diff,'$A_{diff}$',[0,2],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-2,2],False) b.add_poiss_model('iso','$A_{iso}$',[0,2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0,4],False) if add_ps_model: if not min_prior_range: b.add_poiss_model('ps_model','$A_{ps-model}$',[0,6],False) else: b.add_poiss_model('ps_model','$A_{ps-model}$',[0.5,1.5],False) if nfw_dm: # leave this large as NFW struggles to converge at high E b.add_poiss_model('nfw','$l10A_{nfw}$',[-6,6],True) ### Configure final details def sb_string_mod(mod): return ['${S_b^{' + mod + '}}^{'+str(i) + '}$' for i in range(n_ebins)] if not poiss: sb_string = ['$S_b^{'+str(i) + '}$' for i in range(n_ebins)] sb_string_disk = ['${S_b^{disk}}^{'+str(i) + '}$' for i in range(n_ebins)] sb_prior = [ [-3.,3.] for i in range(n_ebins)] sb_prior_log = [True for i in range(n_ebins)] if len(ps_fixed_filename_list) > 0: for comp,filename in map(None,ps_fixed_list,ps_fixed_filename_list): b.load_fixed_ps_model(filename,comp) n_ps += len(ps_fixed_filename_list) print 'n_ps changed to ', n_ps, ' because of fixed templates' for mod in ps_list: b.add_non_poiss_model(mod,['$A_{' +mod+'}$' ,'$n_1^{' +mod+'}$','$n_2^{' +mod+'}$']+sb_string_mod(mod),[[-6,6],[2.05,30],[-2,1.95]] + sb_prior, [True,False,False] + sb_prior_log) if n_ps==1: b.initiate_1_ps_edep_new_from_f_ary(f.f_ary_list,f.df_rho_div_f_ary_list) elif n_ps==2: b.initiate_2_ps_edep_new_from_f_ary(f.f_ary_list,f.df_rho_div_f_ary_list) elif n_ps==3: b.initiate_3_ps_edep_new_from_f_ary(f.f_ary_list,f.df_rho_div_f_ary_list) print 'Performing an NPTF ...' else: b.initiate_poissonian_edep() print 'Performing a standard template fit ...'
import argparse nsim = 100 outstring = 'allhalos' nside = 128 eventclass = 5 # 2 (Source) or 5 (UltracleanVeto) eventtype = 0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles) emin_bin = 0 emax_bin = 40 # Must match the norm file! f_global = fp.fermi_plugin(maps_dir, fermi_data_dir=fermi_data_dir, work_dir=work_dir, CTB_en_min=emin_bin, CTB_en_max=emax_bin, nside=nside, eventclass=eventclass, eventtype=eventtype, newstyle=1, data_July16=True) # Set up J_map J_map = hp.ud_grade(np.load( '/tigress/smsharma/public/GenMaps/GenMapsJumpAround/Jfactor_DS_true_map_100.0_100.0_100.0b2e+20_a3.16e+17.npy' ), nside, power=-2) J_map /= GeV**2 * Centimeter**-5 # Exposure correct then smooth J_map latter must be done first J_map_arr_ps = np.zeros(shape=(emax_bin - emin_bin, len(J_map)))
def scan(self): ################ # Fermi plugin # ################ # Load the Fermi plugin - always load all energy bins, extract what is needed f_global = fp.fermi_plugin(maps_dir, fermi_data_dir=fermi_data_dir, work_dir=work_dir, CTB_en_min=0, CTB_en_max=40, nside=self.nside, eventclass=self.eventclass, eventtype=self.eventtype, newstyle=1, data_July16=True) # Load necessary templates f_global.add_diffuse_newstyle(comp=self.diff, eventclass=self.eventclass, eventtype=self.eventtype) f_global.add_iso() ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy') f_global.add_template_by_hand(comp='ps_model', template=ps_temp) ################### # Get DM halo map # ################### l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] rs = self.catalog.rs.values[self.iobj] * 1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z=self.catalog.z[self.iobj], r_s=rs, J_0=J0, ell=l * np.pi / 180, b=b * np.pi / 180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map ######################################### # Loop over energy bins to get spectrum # ######################################### # ROI where we will normalise our templates ROI_mask = cm.make_mask_total(mask_ring=True, inner=0, outer=10, ring_b=b, ring_l=l) ROI = np.where(ROI_mask == 0)[0] # Setup output output_cube = np.zeros((self.emax + 1 - self.emin, 5, len(ROI))) for iebin, ebin in tqdm(enumerate(np.arange(self.emin, self.emax + 1)), disable=1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base * fermi_exposure / np.sum( DM_template_base * fermi_exposure) ksi = ks.king_smooth(maps_dir, ebin, self.eventclass, self.eventtype, threads=1) DM_template_smoothed = ksi.smooth_the_map(DM_template) DM_intensity_base = np.sum(DM_template_smoothed / fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc = f_global.template_dict['ps_model'][ebin] output_cube[iebin, 0] = ROI output_cube[iebin, 1] = dif[ROI] output_cube[iebin, 2] = iso[ROI] output_cube[iebin, 3] = psc[ROI] output_cube[iebin, 4] = DM_template_smoothed[ROI] np.save(self.save_dir + 'cube_o' + str(self.iobj) + self.mc_tag, output_cube)
def main(): keyfile = 'nptf/IG_NDI/FD_key_1.txt.gz' jetbase = '/tigress/nrodd/FindPSOutput/psdata_2-12-15-m5-b1_roi/psdata_2-12-15-m5-b1_roi_ca_PT_R0p' jetfiles = [jetbase+'15.txt.gz',jetbase+'2.txt.gz',jetbase+'25.txt.gz',jetbase+'3.txt.gz',jetbase+'4.txt.gz',jetbase+'5.txt.gz',jetbase+'6.txt.gz',jetbase+'7.txt.gz',jetbase+'8.txt.gz',jetbase+'9.txt.gz'] jetlabel = ['Jet R0p15','Jet R0p2','Jet R0p25','Jet R0p3','Jet R0p4','Jet R0p5','Jet R0p6','Jet R0p7','Jet R0p8','Jet R0p9'] jetrad = [0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9] outname = '2-12-15-m5_b1' fake_data_key_path = '/tigress/nrodd/NPTFWorking/FindPS/plots/' + keyfile CTB_start_bin=8 CTB_end_bin=16 nside=512 data_type='p8' npix = hp.nside2npix(nside) theta, phi = hp.pix2ang(nside,range(npix)) larr_full = phi*180/np.pi larr_full = ((larr_full + 180) % 360)-180 larr_full *= np.pi/180 barr_full = np.pi/2 - theta band_mask_range_plot = [-1,1] mask_ring_plot = True outer_plot = 10 f = fp.fermi_plugin(maps_dir,CTB_en_min=CTB_start_bin,CTB_en_max=CTB_end_bin,nside=nside,data_name=data_type) # Now calculate dndF from the sim key fake_data_key_load = np.loadtxt(fake_data_key_path) fake_data_key = np.zeros(shape=(len(fake_data_key_load),10)) fake_data_key[::,0:2] = fake_data_key_load[::,0:2] fake_data_key[::,2:10] = fake_data_key_load[::,CTB_start_bin+2:CTB_end_bin+2] # Now pick out point sources within the ROI we want to analyse # NB: cuts in degrees, larr and barr in radians pmfocus=3 ringfocus=10 barr = np.pi/2-fake_data_key[::,0] larr = fake_data_key[::,1]*180/np.pi larr = ((larr + 180) % 360)-180 larr *= np.pi/180 print np.min(larr)*180/np.pi print np.max(larr)*180/np.pi print np.min(barr)*180/np.pi print np.max(barr)*180/np.pi thetaarr = np.arccos(np.cos(larr)*np.cos(barr)) print np.min(thetaarr)*180/np.pi print np.max(thetaarr)*180/np.pi roi = np.where((np.abs(barr) > pmfocus*np.pi/180) & (thetaarr < ringfocus*np.pi/180))[0] bigroi = np.where((np.abs(barr) > (pmfocus-0.9)*np.pi/180) & (thetaarr < (ringfocus+0.9)*np.pi/180))[0] fake_data_keyroi = fake_data_key[roi,::] key_bigroi = fake_data_key[bigroi,::] larr_key = larr[bigroi] barr_key = barr[bigroi] print np.min(thetaarr[roi])*180/np.pi print np.max(thetaarr[roi])*180/np.pi print np.min(barr[roi])*180/np.pi print np.min(thetaarr[bigroi])*180/np.pi print np.max(thetaarr[bigroi])*180/np.pi print np.min(barr[bigroi])*180/np.pi #np.load('FAKE') print "len(roi):",len(roi) print "len(bigroi):",len(bigroi) pfake3FGL = make_flux_histogram_local(fake_data_keyroi,f.CTB_exposure_maps,band_mask_range = band_mask_range_plot, mask_ring = mask_ring_plot, outer = outer_plot) pfake3FGL.make_fake_data_flux_histogram(0.1,5000,10) pfake3FGL.plot_fake_data_histogram(fmt = 'o', color='black',markersize=5,label='Sim PS') # Now read in Jet data and create dndF plots from that # First load the data and background model as we need these jetcolors = ['red','orange','yellow','olive','green','blue','indigo','violet','deeppink','pink'] # Loop through the files #for i in range(10): for i in range(1): print "At i =",i jetload = np.loadtxt(jetfiles[i]) # extract (l,b) from array larr = jetload[::,0] phi = larr*180/np.pi larr *= 180/np.pi larr = ((larr + 180) % 360)-180 larr *= np.pi/180 barr = jetload[::,1] theta = np.pi/2-barr print np.min(larr)*180/np.pi print np.max(larr)*180/np.pi print np.min(larr_key)*180/np.pi print np.max(larr_key)*180/np.pi print np.min(barr)*180/np.pi print np.max(barr)*180/np.pi print np.min(barr_key)*180/np.pi print np.max(barr_key)*180/np.pi # Determine which are in the ROI thetaarr = np.arccos(np.cos(larr)*np.cos(barr)) roi = np.where((np.abs(barr) > pmfocus*np.pi/180) & (thetaarr < ringfocus*np.pi/180))[0] # Now build up the key for each ps in the ROI jet_data_key = np.zeros(shape=(len(roi),10)) #print "len(larr):",len(larr) #print "len(larr_key):",len(larr_key) #print "len(roi):",len(roi) count=0 for j in range(len(roi)): print "j =",j # First put theta and phi in jet_data_key[j,0] = theta[roi[j]] jet_data_key[j,1] = phi[roi[j]] # Now input the counts - determine this by adding counts for sim PS within jetrad thetaps = np.arccos(np.cos(larr[roi[j]]-larr_key)*np.cos(barr[roi[j]]-barr_key)) print np.min(thetaps)*180/np.pi print np.max(thetaps)*180/np.pi print len(thetaps) if j==20: np.load('Fake') pstoadd = np.where(thetaps <= jetrad[i]*np.pi/180)[0] #print "len(pstoadd)",len(pstoadd) if len(pstoadd) > 0: count += 1 print "count =",count for k in range(8): for m in range(len(pstoadd)): print "key_bigroi[pstoadd[m],k+2]=",key_bigroi[pstoadd[m],k+2] jet_data_key[j,k+2] += key_bigroi[pstoadd[m],k+2] print "count:",count # Now plot pfake3FGL = make_flux_histogram_local(jet_data_key,f.CTB_exposure_maps,band_mask_range = band_mask_range_plot, mask_ring = mask_ring_plot, outer = outer_plot) pfake3FGL.make_fake_data_flux_histogram(0.1,5000,10) pfake3FGL.plot_fake_data_histogram(fmt = 'o', color=jetcolors[i],markersize=5,label=jetlabel[i]) plt.yscale('log') plt.xscale('log') plt.xlim([1e-12,1e-6]) plt.ylim([1e3,1e11]) plt.tick_params(axis='x', length=5,width=2,labelsize=18) plt.tick_params(axis='y',length=5,width=2,labelsize=18) plt.xlabel('$F$ [photons / cm$^2$ / s]', fontsize=18) plt.ylabel('$dN/dF$ [photons$^{-1}$ cm$^2$ s deg$^{-2}$]', fontsize=18) plt.legend(fontsize=16) plt.savefig('./Plots/' + outname + '.pdf') plt.close()
def scan(self): ################ # Fermi plugin # ################ # Load the Fermi plugin - always load all energy bins, extract what is needed f_global = fp.fermi_plugin(maps_dir, fermi_data_dir=fermi_data_dir, work_dir=work_dir, CTB_en_min=0, CTB_en_max=40, nside=self.nside, eventclass=self.eventclass, eventtype=self.eventtype, newstyle=1, data_July16=True) # Load necessary templates f_global.add_diffuse_newstyle(comp=self.diff, eventclass=self.eventclass, eventtype=self.eventtype) f_global.add_iso() ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy') f_global.add_template_by_hand(comp='ps_model', template=ps_temp) ################### # Get DM halo map # ################### l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] rs = self.catalog.rs.values[self.iobj] * 1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z=self.catalog.z[self.iobj], r_s=rs, J_0=J0, ell=l * np.pi / 180, b=b * np.pi / 180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map ######################################### # Loop over energy bins to get spectrum # ######################################### # 10 deg mask for the analysis analysis_mask_base = cm.make_mask_total(mask_ring=True, inner=0, outer=10, ring_b=b, ring_l=l) # ROI where we will normalise our templates ROI_mask = cm.make_mask_total(mask_ring=True, inner=0, outer=2, ring_b=b, ring_l=l) ROI = np.where(ROI_mask == 0)[0] # Setup output output_norms = np.zeros((self.emax + 1 - self.emin, 4, 2)) for iebin, ebin in tqdm(enumerate(np.arange(self.emin, self.emax + 1)), disable=1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin data = f_global.CTB_count_maps[ebin].astype(np.float64) # Add large scale mask to analysis mask els_str = [ '0.20000000', '0.25178508', '0.31697864', '0.39905246', '0.50237729', '0.63245553', '0.79621434', '1.0023745', '1.2619147', '1.5886565', '2.0000000', '2.5178508', '3.1697864', '3.9905246', '5.0237729', '6.3245553', '7.9621434', '10.023745', '12.619147', '15.886565', '20.000000', '25.178508', '31.697864', '39.905246', '50.237729', '63.245553', '79.621434', '100.23745', '126.19147', '158.86565', '200.00000', '251.78508', '316.97864', '399.05246', '502.37729', '632.45553', '796.21434', '1002.3745', '1261.9147', '1588.6565' ] ls_mask_load = fits.open( '/tigress/nrodd/LargeObjMask/Allpscmask_3FGL-energy' + els_str[ebin] + 'large-obj.fits') ls_mask = np.array([ np.round(val) for val in hp.ud_grade( ls_mask_load[0].data, self.nside, power=0) ]) analysis_mask = np.vectorize(bool)(analysis_mask_base + ls_mask) fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base * fermi_exposure / np.sum( DM_template_base * fermi_exposure) ksi = ks.king_smooth(maps_dir, ebin, self.eventclass, self.eventtype, threads=1) DM_template_smoothed = ksi.smooth_the_map(DM_template) DM_intensity_base = np.sum(DM_template_smoothed / fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc = f_global.template_dict['ps_model'][ebin] # Get mean values in ROI dif_mu = np.mean(dif[ROI]) iso_mu = np.mean(iso[ROI]) psc_mu = np.mean(psc[ROI]) DM_mu = np.mean(DM_template_smoothed[ROI]) exp_mu = np.mean(fermi_exposure[ROI]) #################### # NPTFit norm scan # #################### n = nptfit.NPTF(tag='norm_o' + str(self.iobj) + '_E' + str(ebin) + self.mc_tag) n.load_data(data, fermi_exposure) n.load_mask(analysis_mask) n.add_template(dif, self.diff) n.add_template(iso, 'iso') n.add_template(psc, 'psc') n.add_template(DM_template_smoothed, 'DM') n.add_poiss_model(self.diff, '$A_\mathrm{dif}$', [0, 10], False) n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0, 20], False) n.add_poiss_model('psc', '$A_\mathrm{psc}$', [0, 10], False) n.add_poiss_model('DM', '$A_\mathrm{DM}$', [0, 1000], False) n.configure_for_scan() ########## # Minuit # ########## keys = n.poiss_model_keys limit_dict = {} init_val_dict = {} step_size_dict = {} for key in keys: if key == 'DM': limit_dict['limit_' + key] = (0, 1000) else: limit_dict['limit_' + key] = (0, 50) init_val_dict[key] = 0.0 step_size_dict['error_' + key] = 1.0 other_kwargs = {'print_level': self.verbose, 'errordef': 1} z = limit_dict.copy() z.update(other_kwargs) z.update(limit_dict) z.update(init_val_dict) z.update(step_size_dict) f = call_ll(len(keys), n.ll, keys) m = Minuit(f, **z) m.migrad(ncall=30000, precision=1e-14) # Output spectra in E^2 dN/dE, in units [GeV/cm^2/s/sr] as mean in 2 degrees output_norms[iebin, 0, 0] = m.values[ 'p8'] * dif_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 0, 1] = m.errors[ 'p8'] * dif_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 1, 0] = m.values[ 'iso'] * iso_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 1, 1] = m.errors[ 'iso'] * iso_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 2, 0] = m.values[ 'psc'] * psc_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 2, 1] = m.errors[ 'psc'] * psc_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 3, 0] = m.values[ 'DM'] * DM_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] output_norms[iebin, 3, 1] = m.errors[ 'DM'] * DM_mu / exp_mu * self.emid[iebin]**2 / self.de[iebin] ################################### # NPTFit fixed DM and bkg profile # ################################### # Make background sum and initiate second scan # If was no data leave bkg_sum as 0 bkg_sum = np.zeros(len(data)) if np.sum(data * np.logical_not(analysis_mask)) != 0: for key in keys: if key != 'DM': # Don't add DM in here if m.values[key] != 0: bkg_sum += n.templates_dict[key] * m.values[key] else: # If zero, use ~parabolic error bkg_sum += n.templates_dict[key] * m.errors[ key] / 2. nDM = nptfit.NPTF(tag='dm_o' + str(self.iobj) + '_E' + str(ebin) + self.mc_tag) nDM.load_data(data, fermi_exposure) nDM.add_template(bkg_sum, 'bkg_sum') # If there is no data, only go over pixels where DM is non-zero if np.sum(data * np.logical_not(analysis_mask)) != 0: nDM.load_mask(analysis_mask) else: nodata_mask = DM_template_smoothed == 0 nDM.load_mask(nodata_mask) nDM.add_poiss_model('bkg_sum', '$A_\mathrm{bkg}$', fixed=True, fixed_norm=1.0) np.save(self.save_dir + 'spec_o' + str(self.iobj) + self.mc_tag, output_norms)
n_flux_bins=15 error_range=0.68 #for plotting uncertanties ######disk filename disk_file_name= 'thindiskmodel_ec.fits' #'simplediskmodel_ec.fits' #################### ##########Internal code ################### ##Fermi plugin instance f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=CTB_start_bin,CTB_en_max=CTB_end_bin+1,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) #f.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) f.add_diffuse_newstyle(comp = 'p6', eventclass = eventclass, eventtype = eventtype) f.add_bubbles() #bubbles f.add_iso() #iso f.add_nfw() #NFW-DM f.add_template_from_file('disk',disk_file_name) #we will call template 'disk' if mask_type != 'False': f.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) f.use_template_normalization_file(norm_file_path,key_suffix='-0')
def main(): global plot_ll_profile, eachps_dict, eachts_dict, eachps_En_center, TSbkg, TS, ps_norms_loop1, ps_norms # Determine PSF using fermi plugin, for use in the ps ordering loadforpsf = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) loadforpsf.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = loadforpsf.sigma_PSF_deg[0:-1] # Get an ordered list of the l and b values by ranking - cutting those outside of region # End of list contains those sources outside region but within 1 PSF of the border larr,barr,n_ps,n_ps_border = trimorderpslist(ps_file,indeg,sigma_PSF_deg) n_groups = int(np.ceil(float(n_ps+n_ps_border)/float(n_ps_run))) n_last_group = n_ps+n_ps_border - (n_groups-1)*n_ps_run # Establish array of norms for the point sources - starting at 1 and adjusted throughout the run ps_norms_loop1 = np.ones(n_ps+n_ps_border) ps_norms = np.ones(n_ps) # The strategy from here is as follows: # Break the point source lists up into groups of n_ps_run, in order of ranking # For each group we float each point source individually and extract its normalisation # Next we create a template combining each of the point sources just fitted # This is done at the analysis step # Then we repeat this for the next group, but add in a template of all the point sources # already run, and repeat this process till all point sources have been run # After this we perform a run with the full ps map to get its spectrum # Then we rerun each ps one by one to get its TS while fixing the rest print 'Determining initial value for each point source...' for i in range(n_groups): if i+1 == n_groups: n_ps_groupi = n_last_group else: n_ps_groupi = n_ps_run # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(i=i,n_ps_groupi=n_ps_groupi,larr=larr,barr=barr, psloop1=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(i=i,n_ps_groupi=n_ps_groupi,minuit_new=False,psloop1=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(i=i,n_ps_groupi=n_ps_groupi,minuit_new=True,psloop1=True) # Now perform run to extract to spectrum of all point sources combined print 'Determining spectrum of combined point source template...' # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,pscomball=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(pscomball=True,minuit_new=False) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(pscomball=True,minuit_new=True) # Now loop over all point sources, floating one at a time with the others fixed print 'Calculating spectrum and significance of each point source in ROI...' # From this we want to extract their TS and spectrum # Initialise empty dictionary to put the spectra in eachps_dict = {} eachts_dict = {} # Now run through loop - need to do this twice for each, once with and once without the point source, in order to determine the TS for i in range(n_ps): TSbkg=0. TS=0. # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(i=i,larr=larr,barr=barr,psloop2bkg=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(i=i,minuit_new=False,psloop2bkg=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() print "MINUIT doesn't currently save TS - need to add this in if want minuit" do_analysis(i=i,minuit_new=True,psloop2bkg=True) # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(i=i,larr=larr,barr=barr,psloop2=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(i=i,minuit_new=False,psloop2=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() print "MINUIT doesn't currently save TS - need to add this in if want minuit" do_analysis(i=i,minuit_new=True,psloop2=True) # Finally save the spectra, norm and ts details # Create norm dictionary and a dictionary where we convert this to a spectrum eachnormloop1_dict={} eachnorm_dict={} for i in range(n_ps+n_ps_border): eachnormloop1_dict['ps_' + str(i+1)] = ps_norms_loop1[i] for i in range(n_ps): eachnorm_dict['ps_' + str(i+1)] = ps_norms[i]*ps_norms_loop1[i] # NB: norm is from the initial scan, spectra and ts from the final scan eachps_savefile = ps_spec_dir + save_spect_label_ps eachnormloop1_savefile = ps_spec_dir + save_norm_loop1_label_ps eachnorm_savefile = ps_spec_dir + save_norm_label_ps eachts_savefile = ps_spec_dir + save_ts_label print 'saving spectra, norm and ts spectra' spect = [[ eachps_En_center, eachps_dict]] np.save(eachps_savefile,np.array(spect)) normloop1 = [[ eachps_En_center, eachnormloop1_dict]] np.save(eachnormloop1_savefile,np.array(normloop1)) norm = [[ eachps_En_center, eachnorm_dict]] np.save(eachnorm_savefile,np.array(norm)) outts = [[ eachps_En_center, eachts_dict]] np.save(eachts_savefile,np.array(outts))
band_mask_range = [-band_mask, band_mask] #measured from the Galactic plane mask_ring = False mask_type = 'top300' #can also be '0.99', 'top300', or 'False' force_energy = True #we will force the PS mask from a specific energy bin energy_bin_mask = 10 norm_file_path = '/tigress/nrodd/2mass2furious/MakeMC/P8UCVA_norm' spect_file_path = '/tigress/nrodd/2mass2furious/MakeMC/P8UCVA_spec' f_total = fp.fermi_plugin(maps_dir, fermi_data_dir=fermi_data_dir, CTB_en_min=CTB_start_bin, CTB_en_max=CTB_end_bin + 1, nside=nside, eventclass=eventclass, eventtype=eventtype, newstyle=newstyle, data_July16=data_July16) #f_total.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) f_total.make_ps_mask(mask_type=mask_type, energy_bin=energy_bin_mask, force_energy=force_energy) f_total.add_diffuse_newstyle(comp='p7', eventclass=eventclass, eventtype=eventtype) #diffuse f_total.add_bubbles() #bubbles f_total.add_iso() #iso f_total.add_ps_model()
parser.add_argument('-d', action='store', dest='tag',type=str) parser.add_argument('-t', action='store', dest='run_tag',type=str) parser.add_argument('-m', action='store', dest='maps_dir',type=str) results = parser.parse_args() run=results.run analysis=results.analysis tag = results.tag run_tag = results.run_tag maps_dir = results.maps_dir print 'maps_dir is ', maps_dir ######################### #configure for fermi f = fp.fermi_plugin(maps_dir) f.make_ps_mask() f.add_diffuse(comp = 'p6') f.add_bubbles() f.add_iso() f.add_nfw() f.add_ps(0,0) ######################### #setup bayesian scan for fermi b = bsm.bayesian_scan_NPTF(tag=tag) b.load_external_data(f.CTB_en_bins,f.CTB_count_maps,f.CTB_exposure_maps) b.add_new_template(f.template_dict) b.make_mask_total(ps_mask_array = f.ps_mask_array)
def main(): keyfile = 'nptf/IG_NDI/FD_key_1.txt.gz' datafile = 'nptf/IG_NDI/FD_1.txt.gz' jetbase = '/tigress/nrodd/FindPSOutput/psdata_29-11-15-m4-b2_roi/psdata_29-11-15-m4-b2_roi_ca_PT_R0p' jetfiles = [jetbase+'2.txt.gz',jetbase+'3.txt.gz',jetbase+'4.txt.gz',jetbase+'5.txt.gz',jetbase+'6.txt.gz',jetbase+'7.txt.gz',jetbase+'8.txt.gz',jetbase+'9.txt.gz'] outname = '29-11-15-m4_b2' fake_data_key_path = '/tigress/nrodd/NPTFWorking/FindPS/plots/' + keyfile CTB_start_bin=8 CTB_end_bin=16 nside=512 data_type='p8' npix = hp.nside2npix(nside) theta, phi = hp.pix2ang(nside,range(npix)) larr_full = phi*180/np.pi larr_full = ((larr_full + 180) % 360)-180 larr_full *= np.pi/180 barr_full = np.pi/2 - theta band_mask_range_plot = [-1,1] mask_ring_plot = True outer_plot = 10 f = fp.fermi_plugin(maps_dir,CTB_en_min=CTB_start_bin,CTB_en_max=CTB_end_bin,nside=nside,data_name=data_type) f.add_diffuse(comp = 'p6') #diffuse f.add_bubbles() #bubbles f.add_iso() #iso f.add_nfw() f.load_psf() sigma_PSF_deg = f.sigma_PSF_deg[0:-1] # Create background model in each energy bin norm_file='/tigress/nrodd/NPTFWorking/FindPS/data/spect/findps_norm.npy' loadnorm = np.load(norm_file) bkg = np.zeros(shape=(CTB_end_bin-CTB_start_bin,npix)) for i in range(CTB_start_bin,CTB_end_bin): comb = f.template_dict['p6'][i-CTB_start_bin]*loadnorm[i][1]['p6-0'] comb += f.template_dict['iso'][i-CTB_start_bin]*loadnorm[i][1]['iso-0'] comb += f.template_dict['bubs'][i-CTB_start_bin]*loadnorm[i][1]['bubs-0'] comb += f.template_dict['nfw'][i-CTB_start_bin]*(10**(loadnorm[i][1]['nfw-0'])) bkg[i-CTB_start_bin,::] = comb # Load the fake data map load = np.loadtxt('/tigress/nrodd/NPTFWorking/FindPS/plots/' + datafile) fakemap = load[CTB_start_bin:CTB_end_bin,::] # Now calculate dndF from the sim key fake_data_key_load = np.loadtxt(fake_data_key_path) fake_data_key = np.zeros(shape=(len(fake_data_key_load),10)) fake_data_key[::,0:2] = fake_data_key_load[::,0:2] fake_data_key[::,2:10] = fake_data_key_load[::,CTB_start_bin+2:CTB_end_bin+2] # Now pick out point sources within the ROI we want to analyse # NB: cuts in degrees, larr and barr in radians pmfocus=3 ringfocus=10 barr = np.pi/2-fake_data_key[::,0] larr = fake_data_key[::,1]*180/np.pi larr = ((larr + 180) % 360)-180 larr *= np.pi/180 thetaarr = np.arccos(np.cos(larr)*np.cos(barr)) roi = np.where((np.abs(barr) > pmfocus*np.pi/180) & (thetaarr < ringfocus*np.pi/180))[0] fake_data_keyroi = fake_data_key[roi,::] pfake3FGL = make_flux_histogram_local(fake_data_keyroi,f.CTB_exposure_maps,band_mask_range = band_mask_range_plot, mask_ring = mask_ring_plot, outer = outer_plot) pfake3FGL.make_fake_data_flux_histogram(0.1,5000,50) pfake3FGL.plot_fake_data_histogram(fmt = 'o', color='black',markersize=5,label='Sim PS') # Now read in Jet data and create dndF plots from that # First load the data and background model as we need these jetcolors = ['red','orange','yellow','green','blue','indigo','violet','pink'] # Loop through the files for i in range(8): print "At i =",i jetload = np.loadtxt(jetfiles[i]) # extract (l,b) from array larr = jetload[::,0] phi = larr*180/np.pi larr = ((larr + 180) % 360)-180 larr *= np.pi/180 barr = jetload[::,1] theta = np.pi/2-barr # Determine which are in the ROI thetaarr = np.arccos(np.cos(larr)*np.cos(barr)) roi = np.where((np.abs(barr) > pmfocus*np.pi/180) & (thetaarr < ringfocus*np.pi/180))[0] # Now build up the key for each ps in the ROI jet_data_key = np.zeros(shape=(len(roi),10)) for j in range(len(roi)): # First put theta and phi in jet_data_key[j,0] = theta[roi[j]] jet_data_key[j,1] = phi[roi[j]] # Now input the counts - determine this from data-bkg in within 1 PSF of the source # Then rescale up as this is just the 68% containment radius thetaps = np.arccos(np.cos(larr[roi[j]]-larr_full)*np.cos(barr[roi[j]]-barr_full)) for k in range(8): PSF = sigma_PSF_deg[k] psroi = np.where(thetaps < PSF*np.pi/180)[0] jet_data_key[j,k+2] = np.sum(bkg[k,psroi]-fakemap[k,psroi])/0.68 # Now plot pfake3FGL = make_flux_histogram_local(jet_data_key,f.CTB_exposure_maps,band_mask_range = band_mask_range_plot, mask_ring = mask_ring_plot, outer = outer_plot) pfake3FGL.make_fake_data_flux_histogram(0.1,5000,50) pfake3FGL.plot_fake_data_histogram(fmt = 'o', color=jetcolors[i],markersize=5,label='Jet R0p'+str(i+2)) plt.yscale('log') plt.xscale('log') plt.xlim([1e-12,1e-6]) plt.ylim([1e3,1e11]) plt.tick_params(axis='x', length=5,width=2,labelsize=18) plt.tick_params(axis='y',length=5,width=2,labelsize=18) plt.xlabel('$F$ [photons / cm$^2$ / s]', fontsize=18) plt.ylabel('$dN/dF$ [photons$^{-1}$ cm$^2$ s deg$^{-2}$]', fontsize=18) plt.legend(fontsize=16) plt.savefig('./Plots/' + outname + '.pdf') plt.close()
def setup_for_scan(larr=[0],barr=[0],add_group_ps=False,ps_number=0,fdn=True,fdrun=False): global f, b, new_template_dict, sigma_PSF_deg, sigma_PSF_deg_red, n_ps, newstyle, ps_norms, diffnormfix # Load Fermi Plugin and its basic functionality f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) if mask_type!='False': f.make_ps_mask(mask_type=mask_type,force_energy=force_ps_mask,energy_bin=force_ps_mask_bin) # Add appropriate templates f.add_diffuse_newstyle(comp = diff, eventclass = eventclass, eventtype = eventtype) if extraicstemp!='False': f.add_diffuse_newstyle(comp = extraicstemp, eventclass = eventclass, eventtype = eventtype) f.add_bubbles(comp = 'bubs') f.add_iso(comp = 'iso') if add_ps_model: f.add_ps_model(comp = 'ps_model') if fdn: f.add_nfw(comp = 'nfw') # Throughout we use a king function or Gaussian for the point sources if add_group_ps: if use_king: f.add_multiple_ps_king_fast(larr[0:ps_number],barr[0:ps_number],rescale=ps_norms[0:ps_number],comp='ps_comb') else: # Add in Gaussian point source - need the PSF for this f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0:-1] f.add_multiple_ps(larr[0:ps_number],barr[0:ps_number],sigma_PSF_deg[0],rescale=ps_norms[0:ps_number],comp='ps_comb') if norm_file!='False': if len(add_norm_file_for_comps) > 0 and add_norm_file != 'False': print 'using first normalization file: ', norm_file f.use_template_normalization_file(norm_file,key_suffix='-0',dont_use_keys=add_norm_file_for_comps) print 'using second normalization file: ', add_norm_file f.use_template_normalization_file(add_norm_file,key_suffix='-0',use_keys=add_norm_file_for_comps) else: f.use_template_normalization_file(norm_file,key_suffix='-0') # Setup Bayesian scan for fermi b = bsm.bayesian_scan_NPTF(tag=tag,work_dir = work_dir,psf_dir = psf_dir,nside=nside,nlive=nlive,k_max=k_max) if fake_data_file=='False': print 'Using real data' count_maps=f.CTB_count_maps new_count_maps = f.CTB_count_maps else: print 'Using fake data from the file', fake_data_file count_maps=np.loadtxt(fake_data_file)[fake_data_emin:fake_data_emax] new_count_maps = np.zeros( (len(count_maps),hp.nside2npix(nside) ) ) for i in range(len(count_maps)): new_count_maps[i] = hp.ud_grade(count_maps[i],nside,power=-2) b.load_external_data(f.CTB_en_bins,new_count_maps,f.CTB_exposure_maps) if use_simplified_templates: b.add_new_template(new_template_dict) if fixed_background: b.add_fixed_templates({'back':new_template_dict['back']}) else: b.add_new_template(f.template_dict) if mask_type!='False': b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer, ps_mask_array = f.ps_mask_array) else: b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer) b.rebin_external_data(1) b.compress_templates() ### Add in the Templates if method != 'minuit': if not min_prior_range: if not fdrun: b.add_fixed_templates({diff:[f.template_dict[diff][0]*diffnormfix]}) else: b.add_poiss_model(diff,'$A_{diff}$',[-5,5],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-5,5],False) b.add_poiss_model('iso','$A_{iso}$',[-3,3],False) b.add_poiss_model('bubs','$A_{bubs}$',[-3,3],False) else: # If already have priors, don't scan over such a large range if not fdrun: b.add_fixed_templates({diff:f.template_dict[diff]*diffnormfix}) else: b.add_poiss_model(diff,'$A_{diff}$',[0.5,1.5],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[0.5,1.5],False) if high_lat: b.add_poiss_model('iso','$A_{iso}$',[0.5,1.5],False) else: b.add_poiss_model('iso','$A_{iso}$',[0.8,1.2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0.5,1.5],False) else: if not fdrun: b.add_fixed_templates({diff:f.template_dict[diff]*diffnormfix}) else: b.add_poiss_model(diff,'$A_{diff}$',[0,2],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-2,2],False) b.add_poiss_model('iso','$A_{iso}$',[0,2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0,4],False) if add_ps_model: if not min_prior_range: b.add_poiss_model('ps_model','$A_{ps-model}$',[0,6],False) else: b.add_poiss_model('ps_model','$A_{ps-model}$',[0.5,1.5],False) if fdn: b.add_poiss_model('nfw','$l10A_{nfw}$',[-6,6],True) if add_group_ps: b.add_poiss_model('ps_comb','$A_{ps-comb}$',[-5,5],False) ### Configure final details def sb_string_mod(mod): return ['${S_b^{' + mod + '}}^{'+str(i) + '}$' for i in range(1)] b.initiate_poissonian_edep() print 'Performing a standard template fit ...'
def main(): global plot_ll_profile, full_dict, eachps_En_center, n_ps, ps_norms, diffnormfix diffnormfix=1. # Determine PSF using fermi plugin, for use in the ps ordering loadforpsf = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) loadforpsf.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = loadforpsf.sigma_PSF_deg[0:-1] # Get an ordered list of the l and b values by ranking - cutting those outside of region # End of list contains those sources outside region but within 1 PSF of the border larr,barr,n_ps,n_ps_border = trimorderpslist(ps_file,indeg,sigma_PSF_deg) # Define empty dictionary full_dict={} # First run to get diff norm # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,add_group_ps=True,ps_number=n_ps,fdn=fixdiffnfw,fdrun=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(minuit_new=False,add_group_ps=False,ps_number=0,fdn=fixdiffnfw,fdrun=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(minuit_new=True,add_group_ps=False,ps_number=0,fdn=fixdiffnfw,fdrun=True) # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,add_group_ps=False,ps_number=0) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(minuit_new=False,add_group_ps=False,ps_number=0) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(minuit_new=True,add_group_ps=False,ps_number=0) # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,add_group_ps=True,ps_number=10) print 'finished setting up the scan.' #hp.mollview(np.sum(f.template_dict['ps_comb'],axis=0),max=1) #plt.show() # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(minuit_new=False,add_group_ps=True,ps_number=10) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(minuit_new=True,add_group_ps=True,ps_number=10) # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,add_group_ps=True,ps_number=100) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(minuit_new=False,add_group_ps=True,ps_number=100) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(minuit_new=True,add_group_ps=True,ps_number=100) # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(larr=larr,barr=barr,add_group_ps=True,ps_number=n_ps) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(minuit_new=False,add_group_ps=True,ps_number=n_ps) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(minuit_new=True,add_group_ps=True,ps_number=n_ps) # Finally save the spectra, norm and ts details # Create norm dictionary and a dictionary where we convert this to a spectrum nfwps_savefile = psnfw_spec_dir + save_nfwps_label spect = [[ eachps_En_center, full_dict]] np.save(nfwps_savefile,np.array(spect))
band_mask_range=[-1.,1.] mask_ring=True outer=30 mask_type='top300' # If don't want to include this, make it 'False' force_energy=True energy_bin_mask=8 # Setup npix, l and b npix = hp.nside2npix(nside) theta, phi = hp.pix2ang(nside,range(npix)) larr = phi barr = np.pi/2 - theta # Setup Fermi module which we need to extract data and determine the background model f = fp.fermi_plugin('/tigress/smsharma/public/CTBCORE/',CTB_en_min=CTB_start_bin,CTB_en_max=CTB_end_bin,nside=nside,data_name='p8') f.add_diffuse(comp = 'p6') #diffuse f.add_bubbles() #bubbles f.add_iso() #iso f.add_nfw() # Load the real data map realdata = np.sum(f.CTB_count_maps,axis=0) # Create the masks if mask_type != 'False': f.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) b = bsm.bayesian_scan_NPTF(nside=nside) b.load_external_data(f.CTB_en_bins,f.CTB_count_maps,f.CTB_exposure_maps) b2 = copy.deepcopy(b) b.make_mask_total(band_mask_range=band_mask_range,mask_ring=mask_ring,outer=outer)
def setup_for_scan(i=0,n_ps_groupi=0,larr=[0],barr=[0],psloop1=False,pscomball=False,psloop2bkg=False,psloop2=False): global f, b, new_template_dict, sigma_PSF_deg, sigma_PSF_deg_red, n_ps, newstyle, ps_norms_loop1, ps_norms # Load Fermi Plugin and its basic functionality f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) if mask_type!='False': f.make_ps_mask(mask_type=mask_type,force_energy=force_ps_mask,energy_bin=force_ps_mask_bin) # Add appropriate templates f.add_diffuse_newstyle(comp = diff, eventclass = eventclass, eventtype = eventtype) if extraicstemp!='False': f.add_diffuse_newstyle(comp = extraicstemp, eventclass = eventclass, eventtype = eventtype) f.add_bubbles(comp = 'bubs') f.add_iso(comp = 'iso') if add_ps_model: f.add_ps_model(comp = 'ps_model') if nfw_dm: f.add_nfw(comp = 'nfw') if psloop1: # Throughout we use a king function or Gaussian for the point sources if use_king: for j in range(n_ps_groupi): f.add_ps_king_fast(larr[i*n_ps_run+j],barr[i*n_ps_run+j],comp ='ps_' + str(i*n_ps_run+j+1)) if i != 0: f.add_multiple_ps_king_fast(larr[0:i*n_ps_run],barr[0:i*n_ps_run],rescale=ps_norms_loop1[0:i*n_ps_run],comp='ps_comb') else: # Add in Gaussian point source - need the PSF for this f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0:-1] for j in range(n_ps_groupi): f.add_ps(larr[i*n_ps_run+j],barr[i*n_ps_run+j],sigma_PSF_deg[0],comp ='ps_' + str(i*n_ps_run+j+1)) if i != 0: f.add_multiple_ps(larr[0:i*n_ps_run],barr[0:i*n_ps_run],sigma_PSF_deg[0],rescale=ps_norms_loop1[0:i*n_ps_run],comp='ps_comb') if pscomball: if use_king: f.add_multiple_ps_king_fast(larr,barr,rescale=ps_norms_loop1,comp='ps_comb') else: f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0:-1] f.add_multiple_ps(larr,barr,sigma_PSF_deg[0],rescale=ps_norms_loop1,comp='ps_comb') if psloop2bkg: if use_king: f.add_multiple_ps_king_fast(larr,barr,rescale=ps_norms_loop1,comp='ps_comb',excluded=i) else: f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0:-1] f.add_multiple_ps(larr,barr,sigma_PSF_deg[0],rescale=ps_norms_loop1,comp='ps_comb',excluded=i) if psloop2: if use_king: f.add_ps_king_fast(larr[i],barr[i],rescale=ps_norms_loop1[i],comp ='ps_' + str(i+1)) f.add_multiple_ps_king_fast(larr,barr,rescale=ps_norms_loop1,comp='ps_comb',excluded=i) else: f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0:-1] f.add_ps(larr[i],barr[i],sigma_PSF_deg[0],rescale=ps_norms_loop1[i],comp ='ps_' + str(i+1)) f.add_multiple_ps(larr,barr,sigma_PSF_deg[0],rescale=ps_norms_loop1,comp='ps_comb',excluded=i) if norm_file!='False': if len(add_norm_file_for_comps) > 0 and add_norm_file != 'False': print 'using first normalization file: ', norm_file f.use_template_normalization_file(norm_file,key_suffix='-0',dont_use_keys=add_norm_file_for_comps) print 'using second normalization file: ', add_norm_file f.use_template_normalization_file(add_norm_file,key_suffix='-0',use_keys=add_norm_file_for_comps) else: f.use_template_normalization_file(norm_file,key_suffix='-0') # Setup Bayesian scan for fermi b = bsm.bayesian_scan_NPTF(tag=tag,work_dir = work_dir,psf_dir = psf_dir,nside=nside,nlive=nlive,k_max=k_max) if fake_data: data = np.loadtxt(fake_data_path) else: data = f.CTB_count_maps b.load_external_data(f.CTB_en_bins,data,f.CTB_exposure_maps) if use_simplified_templates: b.add_new_template(new_template_dict) if fixed_background: b.add_fixed_templates({'back':new_template_dict['back']}) else: b.add_new_template(f.template_dict) if mask_type!='False': b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer, ps_mask_array = f.ps_mask_array) else: b.make_mask_total(plane_mask=plane_mask, band_mask_range = [-pmval,pmval], lcut=lcut, lmin=lmin, lmax=lmax, bcut=bcut, bmin=bmin, bmax=bmax, mask_ring=mask_ring, inner=inner, outer=outer) b.rebin_external_data(1) b.compress_templates() ### Add in the Templates if method != 'minuit': if not min_prior_range: b.add_poiss_model(diff,'$A_{diff}$',[-5,5],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-5,5],False) b.add_poiss_model('iso','$A_{iso}$',[-3,3],False) b.add_poiss_model('bubs','$A_{bubs}$',[-3,3],False) else: # If already have priors, don't scan over such a large range b.add_poiss_model(diff,'$A_{diff}$',[0.5,1.5],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[0.5,1.5],False) if high_lat: b.add_poiss_model('iso','$A_{iso}$',[0.5,1.5],False) else: b.add_poiss_model('iso','$A_{iso}$',[0.8,1.2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0.5,1.5],False) else: b.add_poiss_model(diff,'$A_{diff}$',[0,2],False) if extraicstemp!='False': b.add_poiss_model(extraicstemp,'$A_{ics}$',[-2,2],False) b.add_poiss_model('iso','$A_{iso}$',[0,2],False) b.add_poiss_model('bubs','$A_{bubs}$',[0,4],False) if add_ps_model: if not min_prior_range: b.add_poiss_model('ps_model','$A_{ps-model}$',[0,6],False) else: b.add_poiss_model('ps_model','$A_{ps-model}$',[0.5,1.5],False) if nfw_dm: # leave this large as NFW struggles to converge at high E b.add_poiss_model('nfw','$l10A_{nfw}$',[-6,6],True) if psloop1: for j in range(n_ps_groupi): b.add_poiss_model('ps_' + str(i*n_ps_run+j+1),'$l10A_{ps' + str(i*n_ps_run+j+1) + '}$',[-10,10],True) if i != 0: b.add_poiss_model('ps_comb','$A_{ps-comb}$',[0.5,1.5],False) if pscomball: b.add_poiss_model('ps_comb','$A_{ps-comb}$',[0.5,1.5],False) if psloop2bkg: b.add_poiss_model('ps_comb','$A_{ps-comb}$',[0.5,1.5],False) if psloop2: b.add_poiss_model('ps_' + str(i+1),'$A_{ps' + str(i+1) + '}$',[0.5,1.5],False) b.add_poiss_model('ps_comb','$A_{ps-comb}$',[0,3],False) # Ideally want to basically fix this component, but if make too small minuit crashes ### Configure final details def sb_string_mod(mod): return ['${S_b^{' + mod + '}}^{'+str(i) + '}$' for i in range(1)] b.initiate_poissonian_edep() print 'Performing a standard template fit ...'
n_flux_bins=15 error_range=0.68 #for plotting uncertanties ######disk filename disk_file_name= 'thindiskmodel_ec.fits' #'simplediskmodel_ec.fits' #################### ##########Internal code ################### ##Fermi plugin instance f = fp.fermi_plugin(maps_dir,CTB_en_min=CTB_start_bin,CTB_en_max=CTB_end_bin,nside=nside,data_name=data_type) #f.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) f.add_diffuse(comp = 'p6') #diffuse f.add_bubbles() #bubbles f.add_iso() #iso f.add_nfw() #NFW-DM f.add_template_from_file('disk',disk_file_name) #we will call template 'disk' if mask_type != 'False': f.make_ps_mask(mask_type = mask_type,energy_bin = energy_bin_mask,force_energy=force_energy) f.use_template_normalization_file(norm_file_path,key_suffix='-0')
def __init__(self,Emin,eventclass=5,eventtype=3,fermi_data_dir = '/mnt/hepheno/FermiData/', maps_dir = '/mnt/hepheno/CTBCORE/',nside=128,mask_type='top300',force_energy=False,newstyle=1,diff='p8',data_July16=True): self.f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,CTB_en_min=Emin,CTB_en_max=Emin+1,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle,data_July16=data_July16) self._setup_f(mask_type,Emin,force_energy,eventclass,eventtype,diff) self.extract()
ps_string = ['ps-l-'+str(round(ell_i,3)) + '-b-'+str(round(b_i,3)) for ell_i, b_i in map(None,ell_0,b_0)] inner=0 ################## ################# ####figure out the mask if mask_string!='False': mask = hp.ud_grade( np.array(np.loadtxt(mask_string)), nside) ######################### #configure for fermi f = fp.fermi_plugin(maps_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside) if mask_type!='False': f.make_ps_mask(mask_type = mask_type,force_energy = False) # f.add_diffuse(comp = diff) f.add_bubbles(comp='bubs') f.add_iso(comp='iso') ######Need to figure out the PSF f.load_psf(data_name='p8',fits_file_path = 'False') sigma_PSF_deg = f.sigma_PSF_deg[0] print 'The psf is', sigma_PSF_deg ###### for ell_i, b_i,ps_string_i in map(None,ell_0,b_0,ps_string): f.add_ps(ell_i,b_i,f.sigma_PSF_deg[0],comp = ps_string_i) ############
def scan(self): print("Getting into scan") ################ # Fermi plugin # ################ print("Loading Fermi plugin...") # Load the Fermi plugin - always load all energy bins, extract what is needed f_global = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=self.eventclass,eventtype=self.eventtype,newstyle=1,data_July16=True) print("... done") # Load necessary templates f_global.add_diffuse_newstyle(comp = self.diff,eventclass = self.eventclass, eventtype = self.eventtype) f_global.add_iso() ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy') f_global.add_template_by_hand(comp='ps_model',template=ps_temp) f_global.add_bubbles() # If Asimov normalize the templates and create a summed map if self.Asimov: norm_file = work_dir + '/DataFiles/Misc/P8UCVA_norm.npy' f_global.use_template_normalization_file(norm_file,key_suffix='-0') Asimov_data = np.zeros((40,hp.nside2npix(self.nside))) for key in f_global.template_dict.keys(): Asimov_data += np.array(f_global.template_dict[key]) ################### # Get DM halo map # ################### print("Getting halo map...") if not self.randlocs: # If doing random locations l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] else: badval = True while (badval): test_ell = np.random.uniform(0.,2*np.pi) test_b = np.arccos(np.random.uniform(-1.,1.))-np.pi/2. test_pixval = hp.ang2pix(self.nside, test_b+np.pi/2, test_ell) ps0p5_mask = np.load(work_dir + '/DataFiles/Misc/mask0p5_3FGL.npy') > 0 # Check if not masked with plan or PS mask if ( (np.abs(test_b)*180./np.pi > 20. ) & (ps0p5_mask[test_pixval] == 0)): badval = False l = test_ell*180./np.pi b = test_b*180./np.pi np.savetxt(self.save_dir + "/lb_obj"+str(self.iobj) + ".dat", np.array([l, b])) rs = self.catalog.rs.values[self.iobj]*1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z = self.catalog.z[self.iobj], r_s = rs , J_0 = J0, ell = l*np.pi/180, b = b*np.pi/180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map print("...done") ######################################### # Loop over energy bins to get xsec LLs # ######################################### A_ary = 10**np.linspace(-6,6,200) LL_inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) # 10 deg mask for the analysis analysis_mask = cm.make_mask_total(mask_ring = True, inner = 0, outer = 10, ring_b = b, ring_l = l) for iebin, ebin in tqdm(enumerate(np.arange(self.emin,self.emax+1)), disable = 1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin if self.imc != -1: data = np.load(mc_dir + 'MC_allhalos_p7_' + self.dm_string + '_v' + str(self.imc)+'.npy')[ebin].astype(np.float64) else: data = f_global.CTB_count_maps[ebin].astype(np.float64) fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base*fermi_exposure/np.sum(DM_template_base*fermi_exposure) print("Loading smoothing class...") ksi = ks.king_smooth(maps_dir, ebin, self.eventclass, self.eventtype, threads=1) print("...done!") print("Beginning to smooth...") DM_template_smoothed = ksi.smooth_the_map(DM_template) print("...done!") DM_intensity_base = np.sum(DM_template_smoothed/fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc = f_global.template_dict['ps_model'][ebin] bub = f_global.template_dict['bubs'][ebin] # If doing Asimov this first scan is irrelevant, but takes no time so run #################### # NPTFit norm scan # #################### n = nptfit.NPTF(tag='norm_o'+str(self.iobj)+'_E'+str(ebin)+self.mc_tag) n.load_data(data, fermi_exposure) n.load_mask(analysis_mask) n.add_template(dif, self.diff) n.add_template(iso, 'iso') n.add_template(psc, 'psc') n.add_template(bub, 'bub') n.add_poiss_model(self.diff, '$A_\mathrm{dif}$', [0,10], False) n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0,20], False) if (np.sum(bub*np.logical_not(analysis_mask)) != 0): n.add_poiss_model('bub', '$A_\mathrm{bub}$', [0,10], False) # # Add PS at halo location # ps_halo_map = np.zeros(hp.nside2npix(self.nside)) # ps_halo_idx = hp.ang2pix(self.nside, np.pi/2. - b*np.pi/180., l*np.pi/180.) # ell and b are in rad # ps_halo_map[ps_halo_idx] = 1. # ps_halo_map_smoothed = ksi.smooth_the_map(ps_halo_map) # smooth it # n.add_template(ps_halo_map_smoothed,'ps_halo') # n.add_poiss_model('ps_halo', 'ps_halo', [0,100], False) if self.floatDM: if ebin >= 7: # Don't float DM in initial scan for < 1 GeV. Below here # Fermi PSF is so large that we find the DM often picks up # spurious excesses in MC. n.add_template(DM_template_smoothed, 'DM') n.add_poiss_model('DM', '$A_\mathrm{DM}$', [0,1000], False) if self.float_ps_together: n.add_poiss_model('psc', '$A_\mathrm{psc}$', [0,10], False) else: # Astropy-formatted coordinates of cluster c2 = SkyCoord("galactic", l=[l]*u.deg, b=[b]*u.deg) idx3fgl_10, _, _, _ = c2.search_around_sky(self.c3, 10*u.deg) idx3fgl_18, _, _, _ = c2.search_around_sky(self.c3, 18*u.deg) ps_map_outer = np.zeros(hp.nside2npix(self.nside)) for i3fgl in idx3fgl_18: ps_file = np.load(ps_indiv_dir + '/ps_temp_128_5_'+str(self.eventtype)+'_'+str(i3fgl)+'.npy') ps_map = np.zeros(hp.nside2npix(self.nside)) ps_map[np.vectorize(int)(ps_file[::,ebin,0])] = ps_file[::,ebin,1] if i3fgl in idx3fgl_10: # If within 10 degrees, float individually n.add_template(ps_map, 'ps_'+str(i3fgl)) n.add_poiss_model('ps_'+str(i3fgl), '$A_\mathrm{ps'+str(i3fgl)+'}$', [0,10], False) else: # Otherwise, add to be floated together ps_map_outer += ps_map if np.sum(ps_map_outer) != 0: n.add_template(ps_map_outer, 'ps_outer') n.add_poiss_model('ps_outer', '$A_\mathrm{ps_outer}$', [0,10], False) n.configure_for_scan() ########## # Minuit # ########## # Skip this step if there is 0 data (higher energy bins) if np.sum(data*np.logical_not(analysis_mask)) != 0: keys = n.poiss_model_keys limit_dict = {} init_val_dict = {} step_size_dict = {} for key in keys: if key == 'DM': limit_dict['limit_'+key] = (0,1000) else: limit_dict['limit_'+key] = (0,50) init_val_dict[key] = 0.0 step_size_dict['error_'+key] = 1.0 other_kwargs = {'print_level': self.verbose, 'errordef': 1} z = limit_dict.copy() z.update(other_kwargs) z.update(limit_dict) z.update(init_val_dict) z.update(step_size_dict) f = call_ll(len(keys),n.ll,keys) m = Minuit(f,**z) m.migrad(ncall=30000, precision=1e-14) ################################### # NPTFit fixed DM and bkg profile # ################################### # Make background sum and initiate second scan # If was no data leave bkg_sum as 0 bkg_sum = np.zeros(len(data)) if np.sum(data*np.logical_not(analysis_mask)) != 0: for key in keys: if key != 'DM': # Don't add DM in here if m.values[key] != 0: bkg_sum += n.templates_dict[key]*m.values[key] else: # If zero, use ~parabolic error bkg_sum += n.templates_dict[key]*m.errors[key]/2. nDM = nptfit.NPTF(tag='dm_o'+str(self.iobj)+'_E'+str(ebin)+self.mc_tag) if self.Asimov: # Use background expectation for the data nDM.load_data(Asimov_data[ebin], fermi_exposure) nDM.add_template(Asimov_data[ebin], 'bkg_sum') else: nDM.load_data(data, fermi_exposure) nDM.add_template(bkg_sum, 'bkg_sum') # If there is no data, only go over pixels where DM is non-zero if np.sum(data*np.logical_not(analysis_mask)) != 0: nDM.load_mask(analysis_mask) else: nodata_mask = DM_template_smoothed == 0 nDM.load_mask(nodata_mask) nDM.add_poiss_model('bkg_sum', '$A_\mathrm{bkg}$', fixed=True, fixed_norm=1.0) ##################### # Get intensity LLs # ##################### for iA, A in enumerate(A_ary): new_n2 = copy.deepcopy(nDM) new_n2.add_template(A*DM_template_smoothed,'DM') new_n2.add_poiss_model('DM','DM',False,fixed=True,fixed_norm=1.0) new_n2.configure_for_scan() max_LL = new_n2.ll([]) LL_inten_ary[iebin, iA] = max_LL inten_ary[iebin, iA] = DM_intensity_base*A np.savez(self.save_dir + 'LL_inten_o'+str(self.iobj)+self.mc_tag, LL=LL_inten_ary, intens=inten_ary)
xsec=results.xsec #IC file name force_mask_at_bin_number = emin #hard-coded for now #for the scan run_tag_energy = run_tag #+ '-' +str(emin) + '-' + str(emax) print 'maps_dir is ', maps_dir newstyle=1 # This is a keyword to activate some of my newstyle codes whilst we keep the old around for legacy reasons ######################### #configure for fermi f = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) if mask_type!='False': f.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number) # f.add_diffuse_newstyle(comp = diff, eventclass = eventclass, eventtype = eventtype) f.add_bubbles(comp='bubs') f.add_iso(comp='iso') if ps_model: f.add_ps_model(comp = 'ps_model') # f.add_ps(0,0,0.15) if norm_file!='False': f.use_template_normalization_file(norm_file) ####################################### ###PSF f.load_psf(data_name='p8',fits_file_path = 'False',eventclass=eventclass,eventtype=eventtype)
def main(): global plot_ll_profile, eachps_dict, eachps_En_center # Need the PSF a number of times in this calculation. Rather than continually calculating it, do it once and for all here using the fermi plugin # NB: will later want to compute the King function parameters here too # Determine PSF using fermi plugin loadforpsf = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min = emin,CTB_en_max=emax,nside=nside,eventclass=eventclass,eventtype=eventtype,newstyle=newstyle) loadforpsf.load_psf(data_name='p8',fits_file_path = 'False',eventclass=eventclass,eventtype=eventtype) sigma_PSF_deg = loadforpsf.sigma_PSF_deg[0:-1] # The strategy from here is as follows: # Break the point source lists up into groups of n_ps_run, in order of ranking # For each group we float each point source individually and extract its normalisation # Next we create a template combining each of the point sources just fitted # This is done at the analysis step # Then we repeat this for the next group, but add in a template of all the point sources # already run, and repeat this process till all point sources have been run # After this we perform a run with the full ps map to get its spectrum # Then we rerun each ps one by one to get its TS while fixing the rest for i in range(n_groups): if i+1 == n_groups: n_ps_groupi = n_last_group else: n_ps_groupi = n_ps_run # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(i=i,n_ps_groupi=n_ps_groupi,larr=larr,barr=barr, psloop1=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(i=i,n_ps_groupi=n_ps_groupi,minuit_new=False,psloop1=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(i=i,n_ps_groupi=n_ps_groupi,minuit_new=True,psloop1=True) # Now perform run to extract to spectrum of all point sources combined # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(pscomball=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(pscomball=True,minuit_new=False) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(pscomball=True,minuit_new=True) # Now loop over all point sources, floating one at a time with the others fixed # From this we want to extract their TS and spectrum # Initialise empty dictionary to put the spectra in eachps_dict = {} # Now run through loop for i in range(n_ps): # Setup for scan if method=='minuit' and run==False: pass else: print 'setting up the scan. the method is', method setup_for_scan(i=i,larr=larr,barr=barr,psloop2=True) print 'finished setting up the scan.' # Perform scan / load scan if run: print 'The nside for this scan is ', b.nside if method=='multinest': b.perform_scan(run_tag = run_tag_energy) do_analysis(i=i,minuit_new=False,psloop2=True) elif method=='minuit': b.perform_scan_minuit(run_tag = run_tag_energy) b.save_minuit() do_analysis(i=i,minuit_new=True,psloop2=True) # Finally save the spectra dictionary eachps_savefile = ps_spec_dir + save_spect_label_ps if os.path.isfile(eachps_savefile + '.npy'): print 'appending to eachps spectrum ...' spect = list(np.load(eachps_savefile + '.npy')) spect += [[ eachps_En_center, eachps_dict]] np.save(eachps_savefile,np.array(spect)) else: print 'saving eachps spectra for first time' spect = [[ eachps_En_center, eachps_dict]] np.save(eachps_savefile,np.array(spect))