def main(): """Test module """ plt.figure() leg, lab = ref_igrb_band() igrb, lab2 = ref_igrb_noFGsub() plt.xscale('log') plt.yscale('log') plt.legend([leg, igrb], [lab, lab2]) plt.show()
def maps_view(**kwargs): """Viewer interface for healpix maps """ input_file = kwargs['infile'] healpix_maps = hp.read_map(input_file, field=kwargs['field']) if not os.path.exists(input_file): abort("Map %s not found!" % input_file) t = os.path.basename(input_file) plt.figure(figsize=(10, 7), dpi=80) nside_out = kwargs['udgrade'] logger.info('Returning a map with NSIDE=%i' % nside_out) if kwargs['field'] == 0: if kwargs['counts'] == True: healpix_maps = hp.pixelfunc.ud_grade(healpix_maps, nside_out, pess=True, power=-2) else: healpix_maps = hp.pixelfunc.ud_grade(healpix_maps, nside_out, pess=True) if kwargs['optimized'] == True: logger.info('Optimizing...') hp.mollview(healpix_maps, title=t.replace('.fits',''), \ coord='G', min=1e-7, max=1e-4, norm='log') hp.graticule() overlay_tag(color='silver', x=0.45) save_current_figure(t.replace('.fits', '.png')) else: hp.mollview(healpix_maps, title=t.replace('.fits',''), \ coord='G') hp.graticule() overlay_tag(color='silver', x=0.45) save_current_figure(t.replace('.fits', '.png')) else: for i, maps in enumerate(healpix_maps): healpix_maps = hp.pixelfunc.ud_grade(healpix_maps, nside_out, \ pess=True) if kwargs['optimized'] == True: logger.info('Optimizing...') hp.mollview(maps, title=t.replace('.fits','_%i'%i), \ coord='G', min=1e-7, max=1e-4, norm='log') hp.graticule() overlay_tag(color='silver', x=0.45) save_current_figure(t.replace('.fits', '.png')) else: hp.mollview(healpix_maps, title=t.replace('.fits',''), \ coord='G') hp.graticule() overlay_tag(color='silver', x=0.05) save_current_figure(t.replace('.fits', '_%i.png' % i))
def main(): """Test module """ leg, lab = ref_cp_band() plt.legend() plt.xscale('log') plt.yscale('log') plt.show() plt.figure() leg, lab = ref_igrb_band() igrb, lab2 = ref_igrb_noFGsub() plt.xscale('log') plt.yscale('log') plt.legend([leg, igrb], [lab, lab2]) plt.show()
emin, emax, emean = [], [], [] cls_tocompare, clerrs_tocompare = [], [] for f in Cl_FILES: emin, emax, emean, cls, clerrs = cl_parse(f) cls_tocompare.append(cls) clerrs_tocompare.append(clerrs) from GRATools.utils.gWindowFunc import get_psf_ref psf_ref = get_psf_ref(psf_ref_file) ymin, ymax = -1e-15, 1e-15 for i in range(0, len(cls_tocompare[0])): psf_en = psf_ref(emean[i]) l_max = _l_max[i]#min(500, 1.9*(np.pi/np.radians(psf_en))) l_min = _l_min[i]#min(60, max(50-i*5,10)) plt.figure(figsize=(10, 7), dpi=80) for j, f in enumerate(Cl_FILES): _l = np.arange(1, len(cls_tocompare[j][i])) _l_rebin, _cls_rebin, _clerrs_rebin = [], [], [] xerrL, xerrR = [], [] for bmin, bmax in zip(rebinning[:-1], rebinning[1:]): _l_rebin.append(np.sqrt(bmin*bmax)) xerrL.append(abs(np.sqrt(bmin*bmax)-bmin)) xerrR.append(abs(np.sqrt(bmin*bmax)-bmax)) _index = np.where(np.logical_and(_l>=bmin, _l<bmax)) clmean = np.average(cls_tocompare[j][i][_index]) clmeanerr = np.sqrt(np.sum(clerrs_tocompare[j][i][_index]**2))/\ np.sqrt(len(cls_tocompare[j][i][_index])) _cls_rebin.append(clmean) _clerrs_rebin.append(clmeanerr) _l_rebin = np.array(_l_rebin)
def fit_foreground_poisson(fore_map, data_map, mask_map=None, n_guess=1., c_guess=0.1,exp=None, smooth=False, show=False): """Performs the poisonian fit, recursively computing the log likelihood (using poisson_likelihood) for a grid of values of fit parameters around the guess. Returns the values of parameters which minimize the log likelihood, togather to the 1-sigma error n_guess : float initial guess for normalization parameter c_guess : float initial guess for constant parameter fore_map : numpy array helapix map of foreground model data_map : numpy array helapix map of data. It could be either a count map or a flux map. If a counts map is given, an exposure map should be given too. See next parameter. exp : numpy array or None helapix map of the exposure. Should be given if the data map is in counts (beacause foreground map is in flux units by default and it needs to be turned to counts to be fitted). While, If data map is in flux units, do not declare this parameter, which is None by default. smooth : bool not implemented yet... show : bool if true it shows some usefull plot to check if the fit is functioning """ #show=True logger.info('Performing poissonian fit...') norm_guess = n_guess igrb_guess = c_guess nside_out = 64 mask = 0. if mask_map is None: logger.info('fit outside default mask: 30deg gp, 2 deg srcs.') mask_f = os.path.join(GRATOOLS_CONFIG, 'fits/Mask64_src2_gp30.fits') mask = hp.read_map(mask_f) else: logger.info('fit outside mask given in config file.') mask = mask_map logger.info('down grade...') fore_repix = np.array(hp.ud_grade(fore_map, nside_out=nside_out)) data_repix = np.array(hp.ud_grade(data_map, nside_out=nside_out, power=-2)) mask_repix = np.array(hp.ud_grade(mask, nside_out=nside_out, power=-2)) mask_repix[np.where(mask_repix!=np.amax(mask_repix))[0]] = 0 mask_repix[np.where(mask_repix==np.amax(mask_repix))[0]] = 1 _unmask = np.where(mask_repix > 1e-30)[0] norm_list = np.linspace(norm_guess*0.3, norm_guess*1.5, 50) igrb_list = np.linspace(igrb_guess*0.01, igrb_guess*10., 200) logger.info('Minimization likelihood run1...') lh_list = [] combinations = list(product(norm_list, igrb_list)) if exp is not None: exposure = exp exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out)) areapix = 4*np.pi/(len(data_repix)) for i,j in product(norm_list, igrb_list): lh = poisson_likelihood(i, j, fore_repix[_unmask], data_repix[_unmask], exp=exposure[_unmask], sr=areapix) lh_list.append(lh) else: for i,j in product(norm_list, igrb_list): lh = poisson_likelihood(i, j, fore_repix[_unmask], data_repix[_unmask]) lh_list.append(lh) lh_min = np.argmin(np.array(lh_list)) (norm_min, igrb_min) = combinations[lh_min] logger.info('Run1 results: n=%.3f c=%.1e'%(norm_min, igrb_min)) norm_list = np.linspace(norm_min*0.7, norm_min*1.2, 51) igrb_list = np.linspace(igrb_min*0.5, igrb_min*1.5, 101) logger.info('Minimization likelihood run2...') lh_list = [] combinations = np.array(list(product(norm_list, igrb_list))) if exp is not None: exposure = exp exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out)) areapix = 4*np.pi/(len(data_repix)) for i,j in product(norm_list, igrb_list): lh = poisson_likelihood(i, j, fore_repix[_unmask], data_repix[_unmask], exp=exposure[_unmask], sr=areapix) lh_list.append(lh) else: for i,j in product(norm_list, igrb_list): lh = poisson_likelihood(i, j, fore_repix[_unmask], data_repix[_unmask]) lh_list.append(lh) lh_list = np.array(lh_list) lh_min = np.argmin(lh_list) (norm_min, igrb_min) = combinations[lh_min] logger.info('Run2 results: n=%.3f c=%e'%(norm_min, igrb_min)) lh_delta = np.array(lh_list)[lh_min]+2.3 index = np.where(np.array(lh_list) < lh_delta)[0] _norm = np.array([x[0] for x in combinations[index]]) logger.info('Norm err: %.4f - %.4f'%(_norm[0], _norm[-1])) _igrb = np.array([x[1] for x in combinations[index]]) logger.info('Igrb err: %.e - %.e'%(np.amin(_igrb), np.amax(_igrb))) if show == True: n = np.array([x[0] for x in combinations]) plt.figure(facecolor='white') plt.plot(n, lh_list, 'o', color='coral', alpha=0.3) plt.plot(norm_min, lh_list[lh_min] , 'r*') plt.plot([_norm[0], _norm[-1]], [lh_delta, lh_delta], 'r-') plt.xlabel('Normalization') plt.ylabel('-Log(Likelihood)') igrb = np.array([x[1] for x in combinations]) plt.figure(facecolor='white') plt.plot(igrb, lh_list, 'o', color='coral', alpha=0.3) plt.plot(igrb_min, lh_list[lh_min] , 'r*') plt.plot([np.amin(_igrb), np.amax(_igrb)], [lh_delta, lh_delta], 'r-') plt.xlabel('Constant') plt.ylabel('-Log(Likelihood)') fig = plt.figure(facecolor='white') z = lh_list zmin = lh_list[lh_min] z.shape = (len(norm_list), len(igrb_list)) ax = fig.add_subplot(111) cax = ax.matshow(z, origin='lower', cmap='Spectral', aspect='auto') plt.xlabel('$C$ $[cm^{-2}s^{-1}sr^{-1}]$') plt.ylabel('$N$') #plt.title('$\Phi_{data}=N\cdot\Phi_{model}+C$') x_ticks = np.linspace(np.amin(igrb_list), np.amax(igrb_list), 6) formatting_function = np.vectorize(lambda f: format(f, '6.1E')) x_ticks = list(formatting_function(x_ticks)) y_ticks = list(np.around(np.linspace(np.amin(norm_list), np.amax(norm_list), 6), decimals=3)) ax.set_yticklabels(['']+y_ticks) ax.set_xticklabels(['']+x_ticks) ax.xaxis.set_ticks_position('bottom') cb = plt.colorbar(cax, format='$%.1e$') cb.set_label('-Log(Likelihood)', rotation=90) norm_min_ind = list(norm_list).index(norm_min) igrb_min_ind = list(igrb_list).index(igrb_min) _norm_ind = [] _igrb_ind = [] for i in range(0, len(index)): _norm_ind.append(list(norm_list).index(_norm[i])) _igrb_ind.append(list(igrb_list).index(_igrb[i])) _norm_ind = np.array(_norm_ind) _igrb_ind = np.array(_igrb_ind) plt.contourf(z, [zmin, zmin+2.3, zmin+4.61, zmin+5.99], colors='w', origin='lower', alpha=0.3) plt.scatter(igrb_min_ind, norm_min_ind, s=45, c='w', marker='+') plt.show() return norm_min, igrb_min, _norm[0], _norm[-1], np.amin(_igrb), \ np.amax(_igrb)
def fit_fore_src_poisson(fore_map, data_map, srctempl_map, mask_map=None, n1_guess=1., c_guess=1, n2_guess=1., exp=None, smooth=False, show=False): """Performs the poisonian fit, recursively computing the log likelihood (using poisson_likelihood) for a grid of values of fit parameters around the guess. Returns the values of parameters which minimize the log likelihood, togather to the 1-sigma error n1_guess : float initial guess for normalization parameter n2_guess : float initial guess for normalization parameter c_guess : float initial guess for constant parameter fore_map : numpy array helapix map of foreground model data_map : numpy array helapix map of data. It could be either a count map or a flux map. If a counts map is given, an exposure map should be given too. See next parameter. exp : numpy array or None helapix map of the exposure. Should be given if the data map is in counts (beacause foreground map is in flux units by default and it needs to be turned to counts to be fitted). While, If data map is in flux units, do not declare this parameter, which is None by default. smooth : bool not implemented yet... show : bool if true it shows some usefull plot to check if the fit is functioning """ #show=True logger.info('Performing poissonian fit...') norm1_guess = n1_guess norm2_guess = n2_guess igrb_guess = c_guess nside_out = 64 mask = 0. if mask_map is None: logger.info('fit outside default mask: 30deg gp, 2 deg srcs.') mask_f = os.path.join(GRATOOLS_CONFIG, 'fits/Mask64_src2_gp30.fits') mask = hp.read_map(mask_f) else: logger.info('fit outside mask given in config file.') mask = mask_map logger.info('down grade...') fore_repix = np.array(hp.ud_grade(fore_map, nside_out=nside_out)) data_repix = np.array(hp.ud_grade(data_map, nside_out=nside_out, power=-2)) srct_repix = np.array(hp.ud_grade(srctempl_map, nside_out=nside_out)) mask_repix = np.array(hp.ud_grade(mask, nside_out=nside_out,power=-2)) mask_repix[np.where(mask_repix!=np.amax(mask_repix))[0]] = 0 mask_repix[np.where(mask_repix==np.amax(mask_repix))[0]] = 1 _unmask = np.where(mask_repix > 1e-30)[0] logger.info('initial guesses: n1=%.1e, n2=%.1e, c=%.1e'%(norm1_guess, norm2_guess, igrb_guess)) norm1_list = np.linspace(norm1_guess*0.3, norm1_guess*2, 21) norm2_list = np.linspace(norm2_guess*0.1, norm2_guess*10, 21) print norm2_list print norm1_list igrb_list = np.linspace(igrb_guess*0.01, igrb_guess*10., 101) logger.info('Minimization likelihood run1...') lh_list = [] combinations = list(product(norm1_list, norm2_list, igrb_list)) if exp is not None: exposure = exp exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out)) areapix = 4*np.pi/(len(data_repix)) for i,j,k in combinations: lh = poisson_likelihood_2(i, j, k, fore_repix[_unmask], data_repix[_unmask], srct_repix[_unmask], exp=exposure[_unmask], sr=areapix) lh_list.append(lh) else: for i,j,k in combinations: lh = poisson_likelihood_2(i, j, k, fore_repix[_unmask], data_repix[_unmask], srct_repix[_unmask]) lh_list.append(lh) lh_min = np.argmin(np.array(lh_list)) (norm1_min, norm2_min, igrb_min) = combinations[lh_min] logger.info('Run1 results: n1=%.3f n2=%.2e c=%.2e'%(norm1_min, norm2_min, igrb_min)) norm1_list = np.linspace(norm1_min*0.9, norm1_min*1.7, 21) norm2_list = np.linspace(norm2_min*0.5, norm2_min*5, 21) print norm2_list print norm1_list igrb_list = np.linspace(igrb_min*0.5, igrb_min*1.5, 101) logger.info('Minimization likelihood run2...') lh_list = [] combinations = np.array(list(product(norm1_list, norm2_list, igrb_list))) if exp is not None: exposure = exp exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out)) areapix = 4*np.pi/(len(data_repix)) for i,j,k in product(norm1_list, norm2_list, igrb_list): lh = poisson_likelihood_2(i, j, k, fore_repix[_unmask], data_repix[_unmask], srct_repix[_unmask], exp=exposure[_unmask], sr=areapix) lh_list.append(lh) else: for i,j,k in product(norm1_list, norm2_list, igrb_list): lh = poisson_likelihood_2(i, j, k, fore_repix[_unmask], data_repix[_unmask],srct_repix[_unmask]) lh_list.append(lh) lh_list = np.array(lh_list) lh_min = np.argmin(lh_list) (norm1_min, norm2_min, igrb_min) = combinations[lh_min] logger.info('Run2 results: n1=%.3f n2=%.2e c=%.2e'%(norm1_min, norm2_min, igrb_min)) lh_delta = np.array(lh_list)[lh_min]+2.3 index = np.where(np.array(lh_list) < lh_delta)[0] _norm1 = np.array([x[0] for x in combinations[index]]) logger.info('Norm1 err: %.4f - %.4f'%(_norm1[0], _norm1[-1])) n1_err = (_norm1[0], _norm1[-1]) _norm2 = np.array([x[1] for x in combinations[index]]) logger.info('Norm2 err: %.2e - %.2e'%(_norm2[0], _norm2[-1])) n2_err = (_norm2[0], _norm2[-1]) _igrb = np.array([x[2] for x in combinations[index]]) logger.info('Igrb err: %.2e - %.2e'%(np.amin(_igrb), np.amax(_igrb))) igrb_err = (np.amin(_igrb), np.amax(_igrb)) if show == True: n1 = np.array([x[0] for x in combinations]) n2 = np.array([x[1] for x in combinations]) c = np.array([x[2] for x in combinations]) plt.figure(facecolor='white') plt.plot(n2, lh_list, 'o', color='coral', alpha=0.3) plt.plot(norm2_min, lh_list[lh_min] , 'r*') plt.plot([_norm2[0], _norm2[-1]], [lh_delta, lh_delta], 'r-') plt.xlabel('Normalization') plt.ylabel('-Log(Likelihood)') plt.title('norm src') plt.figure(facecolor='white') plt.plot(n1, lh_list, 'o', color='coral', alpha=0.3) plt.plot(norm1_min, lh_list[lh_min] , 'r*') plt.plot([_norm1[0], _norm1[-1]], [lh_delta, lh_delta], 'r-') plt.xlabel('Normalization') plt.ylabel('-Log(Likelihood)') plt.title('norm fore') plt.figure(facecolor='white') plt.plot(c, lh_list, 'o', color='coral', alpha=0.3) plt.plot(igrb_min, lh_list[lh_min] , 'r*') plt.xlabel('Normalization') plt.ylabel('-Log(Likelihood)') plt.show() return norm1_min, norm2_min, igrb_min, n1_err, n2_err, igrb_err
def pol_cov_parse(pol_cov_out_file, wl_array=None, rebin=False, show=False): """Created to parse and return the fits output file of PolSpice, which contains the covariance matrix of the angular power spectra (APS). pol_cov_out_file : .fits file containing the covariance matrix created by PolSpice wl_array : numpy array (or spline) array (or the spline) of the Wbeam function as a funcion of l integrated in a energy bin. rebin : bool if True a multipole rebinni of the APS is done. ATT: the multipole bins are hardcoded; if you want to change it you must modify 'rebinning' variable defined at the beginnig of GRATools/utils/gPolSpice.py show : bool if True a png image of the covariance matrix is saved in GRATools/output/figures """ hdu = pf.open(pol_cov_out_file) _cov = hdu[0].data[0] hdu.close() _l = np.arange(len(_cov)) if wl_array is not None: wl = wl_array _l = np.arange(len( wl_array)) _cov = np.array([_cov[i][:len(wl)] for i in range(0,len(wl))]) _cov = _cov/(wl**2) for l in _l: _cov[l] = _cov[l]/(wl[l]**2) if rebin: _covr = [] _lr = [] for imin, imax in zip(rebinning[:-1], rebinning[1:]): _imean = np.sqrt(imin*imax) _covrj = [] _lmean = np.sqrt(imin*imax) _lr.append(_lmean) for jmin, jmax in zip(rebinning[:-1], rebinning[1:]): _covrj.append(np.mean(_cov[imin:imax, jmin:jmax])) _covr.append(np.array(_covrj)) _cov = np.array(_covr) _l = np.array(_lr) else: pass pic.dump(_cov, open(pol_cov_out_file.replace('.fits', '.pkl'),'wb')) if show==True: _cov2ploti = [] for i in range(0, len(_l)): sigii = _cov[i][i] _cov2plotj = [] for j in range(0, len(_l)): sigjj = _cov[j][j] sigij = _cov[j][i] if sigij < 0: sigij = 1e-100 _cov2plotj.append(np.sqrt(sigij/np.sqrt(sigii*sigjj))) _cov2ploti.append(_cov2plotj) _cov2ploti = np.array(_cov2ploti) fig = plt.figure(facecolor='white') ax = fig.add_subplot(111) cax = ax.matshow(np.log10(np.abs(_cov)), origin='lower', aspect='auto', cmap='Spectral') en_tick = list(np.logspace(0, np.log10(1500), 6).astype(int)) ax.set_yticklabels(['']+en_tick) ax.set_xticklabels(['']+en_tick) plt.title('Covariance matrix') plt.xlabel('$l_{i}$') plt.ylabel('$l_{j}$') cb = plt.colorbar(cax, format='$%i$') plt.grid() save_current_figure(os.path.basename(pol_cov_out_file).replace('.fits', '')) return _cov
def main(): """Test module """ """ plt.figure(figsize=(10, 7), dpi=80) _l = np.arange(0, 10, 2) for l in _l: pl_th = get_pl_vs_th(l, np.arange(-1, 1, 0.00001)) plt.plot(np.arange(-1, 1, 0.00001), pl_th, '.', label='l = %i'%l) plt.legend() #plt.show() """ #out_wbeam_txt = 'output/Wbeam_P8R2_ULTRACLEANVETO_V6_56.txt' #wb = get_wbeam(out_wbeam_txt) #wb.plot() """ NSIDE = 1 NPIX = hp.nside2npix(NSIDE) iii = np.arange(NPIX) dec, ra = IndexToDeclRa(NSIDE, iii) index = np.where(abs(dec)>10) ra = ra[index] dec = dec[index] #ra = ra #dec = dec plt.figure(figsize=(10, 7), dpi=80) hp.mollview(iii, title="Mollview image RING") plt.figure(figsize=(10, 7), dpi=80) lab, plots = [], [] for i in range(0, len(ra)): print ra[i], dec[i] out_wbeam_txt = 'output/%i_prova.txt'%i psf_file = 'output/%i_prova.fits'%i dict_gtpsf = {'expcube':'/data1/data/FT-files/output/output_gtltcube'+\ '/Allyrs_filtered_gti_ltcube.fits', 'outfile': psf_file, 'irfs': 'P8R2_ULTRACLEANVETO_V6', 'evtype': 3, 'ra': ra[i], 'dec': dec[i], 'emin': 500, 'emax': 600000, 'nenergies': 10, 'thetamax': 30, 'ntheta': 300} from GRATools.utils.ScienceTools_ import gtpsf gtpsf(dict_gtpsf) _l = np.arange(0, 1000, 4) psf = get_psf(psf_file) if not os.path.exists(out_wbeam_txt): wb = build_wbeam(psf, _l, out_wbeam_txt) else: wb = get_wbeam(out_wbeam_txt) wb_1GeV = wb.hslice(1000) wl = wb_1GeV.plot(show=False, label='RA:%i, Dec:%i'%(ra[i],dec[i])) #lab.append('%i-%i'%(ra[i],dec[i])) #plots.append(wl) """ out_wbeam_txt = 'output/Wbeam_P8R2_ULTRACLEANVETO_V6_56.txt' wb = get_wbeam(out_wbeam_txt) plt.figure(figsize=(10, 7), dpi=80) _l = np.arange(0, 1000, 4) wb_500MeV = wb.hslice(500) plt.plot(wb_500MeV.x, wb_500MeV.y**2, label='0.5 GeV') wb_5GeV = wb.hslice(5000) plt.plot(wb_5GeV.x, wb_5GeV.y**2, label='5 GeV') wb_50GeV = wb.hslice(50000) plt.plot(wb_50GeV.x, wb_50GeV.y**2, label='50 GeV') wb_100GeV = wb.hslice(100000) plt.plot(wb_100GeV.x, wb_100GeV.y**2, label='100 GeV') wb_300GeV = wb.hslice(300000) plt.plot(wb_300GeV.x, wb_300GeV.y**2, label='300 GeV') plt.legend() plt.xlabel('Energy [MeV]') plt.ylabel('W$^{2}$$_{beam}$') plt.show()