def test_correlated_alm(): lmax = 2000 ells = np.arange(0, lmax, 1) def get_cls(ells, index, amplitude): cls = amplitude * ells.astype(np.float32)**index cls[ells < 2] = 0 return cls Clf1f1 = get_cls(ells, -1, 1) Clf2f2 = get_cls(ells, -1.3, 2) Clf1f2 = get_cls(ells, -1.4, 0.5) alm_f1 = hp.synalm(Clf1f1, lmax=lmax - 1) alm_f2 = deltag.generate_correlated_alm(alm_f1, Clf1f1, Clf2f2, Clf1f2) f1f1 = hp.alm2cl(alm_f1, alm_f1) f2f2 = hp.alm2cl(alm_f2, alm_f2) f1f2 = hp.alm2cl(alm_f1, alm_f2) pl = io.Plotter(xyscale='linlog', scalefn=lambda x: x) pl.add(ells, f1f1, color="C0", alpha=0.4) pl.add(ells, f2f2, color="C1", alpha=0.4) pl.add(ells, f1f2, color="C2", alpha=0.4) pl.add(ells, Clf1f1, label="f1f1", color="C0", ls="--", lw=3) pl.add(ells, Clf2f2, label="f2f2", color="C1", ls="--", lw=3) pl.add(ells, Clf1f2, label="f1f2", color="C2", ls="--", lw=3) pl.done()
def calculate(self): """ calculate various properties of the gaussian, fnl, gnl maps i.e. compute * Ais * As * Cls * dipoles using remove_dipole """ inpCls=self.inputCls[:self.lmax+1] if (self.gausmap0!=None): self.gausCls0=hp.alm2cl(self.gausalm0)[0:self.lmax+1] self.gausCls1=hp.alm2cl(self.gausalm1)[0:self.lmax+1] self.gausA0=get_A0(self.gausCls0[1:], inpCls[1:]) self.gausAi=Ais(self.gausmap1, self.lmax); self.gausA=AistoA(self.gausAi) self.gausAi2=Ais(self.gausmap0, self.lmax); self.gausA2=AistoA(self.gausAi2) self.gausmp=hp.remove_monopole(self.gausmap0, fitval=True)[1] self.gausdipole=get_dipole(self.gausmap1) if (self.fnlmaps0!=None): self.fnlCls0=[]; self.fnlCls1=[]; self.fnlA0=[]; self.fnlAi=[]; self.fnlAi2=[] self.fnlmp=[]; self.fnldipole=[]; self.fnlA=[]; self.fnlA2=[] for i in range(self.Nfnls): self.fnlCls0.append(hp.anafast(self.fnlmaps0[i], nspec=self.lmax)) self.fnlCls1.append(hp.anafast(self.fnlmaps1[i], nspec=self.lmax)) self.fnlA0.append(get_A0(self.fnlCls0[i][1:], inpCls[1:])) self.fnlAi.append(Ais(self.fnlmaps1[i], self.lmax)); self.fnlA.append(AistoA(self.fnlAi[i])) self.fnlAi2.append(Ais(self.fnlmaps0[i], self.lmax)); self.fnlA2.append(AistoA(self.fnlAi2[i])) self.fnlmp.append(hp.remove_monopole(self.fnlmaps0[i], fitval=True)[1]) self.fnldipole.append(get_dipole(self.fnlmaps1[i]))
def get_spectra(tube, omap, nside, res, white, w2): zeros = [] if nside is not None: mlmax = nside * 2 alms = [ hp.map2alm(omap[i, 0, ...], lmax=mlmax, pol=True) / np.sqrt(w2) for i in range(2) ] else: mlmax = 4096 * 2 / res alms = [ cs.map2alm(omap[i, 0, ...], lmax=mlmax, spin=[0, 2]) / np.sqrt(w2) for i in range(2) ] for i in range(2): for j in range(i, 2): for p in range(3): for q in range(p + 1, 3): zeros.append(hp.alm2cl(alms[i][p], alms[j][q])) for i in range(1, 3): zeros.append(hp.alm2cl(alms[0][i], alms[1][i])) c11 = [hp.alm2cl(alms[0][i], alms[0][i]) for i in range(3)] c22 = [hp.alm2cl(alms[1][i], alms[1][i]) for i in range(3)] cross = hp.alm2cl(alms[0][0], alms[1][0]) if white or tube[0] == "S": zeros.append(cross) c12 = [] else: c12 = [cross] ls = np.arange(c11[0].size) return ls, c11, c22, c12, zeros
def check_EBlm2d(nu1=100,nu2=143, lmax=300, maskfield=2, source_maskfield=0, label_loc='lower right', xmax=None): map_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nu1) Q1,U1 =hp.read_map(data_path + map_name, field=(1,2)) map_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nu2) Q2,U2 =hp.read_map(data_path + map_name, field=(1,2)) mask=hp.read_map(data_path + 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits', field=maskfield) smask=hp.read_map(data_path + 'HFI_Mask_PointSrc_2048_R2.00.fits', field=source_maskfield) mask *= smask hdulist = fits.open(data_path + 'HFI_RIMO_Beams-100pc_R2.00.fits') beam1 = hdulist[beam_index['{}P'.format(nu1)]].data.NOMINAL[0][:lmax+1] beam2 = hdulist[beam_index['{}P'.format(nu2)]].data.NOMINAL[0][:lmax+1] elm1,blm1 = get_ElmBlm(lmax=lmax, Qmap=Q1, Umap=U1, mask=mask, healpy_format=False, recalc=True, div_beam=beam1) elm_hp1,blm_hp1 = get_ElmBlm(lmax=lmax, Qmap=Q1, Umap=U1, mask=mask, healpy_format=True, recalc=True, div_beam=beam1) elm2,blm2 = get_ElmBlm(lmax=lmax, Qmap=Q2, Umap=U2, mask=mask, healpy_format=False, recalc=True, div_beam=beam2) elm_hp2,blm_hp2 = get_ElmBlm(lmax=lmax, Qmap=Q2, Umap=U2, mask=mask, healpy_format=True, recalc=True, div_beam=beam2) clee = cl_alm2d(alm1=elm1, alm2=elm2, lmax=lmax) clbb = cl_alm2d(alm1=blm1,alm2=blm2, lmax=lmax) l = np.arange(len(clee)) clee_hp = hp.alm2cl(elm_hp1,elm_hp2, lmax=lmax) clbb_hp = hp.alm2cl(blm_hp1,blm_hp2, lmax=lmax) l_hp = np.arange(len(clee_hp)) clplanck = np.loadtxt(data_path + 'bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl') clee_planck = clplanck[:,3] clbb_planck = clplanck[:,4] l_planck = clplanck[:,0] pl.figure() pl.title('EE check') pl.plot(l, clee*l*(l+1)/2./np.pi*1e12, label='2d') pl.plot(l,clee_hp*l_hp*(l_hp+1)/2./np.pi*1e12, label='healpy') pl.plot(l_planck, clee_planck, label='planck best fit') pl.legend(loc=label_loc) if xmax is None: pl.xlim(xmax=lmax) else: pl.xlim(xmax=xmax) pl.figure() pl.title('BB check') pl.plot(l, clbb*l*(l+1)/2./np.pi*1e12, label='2d') pl.plot(l_hp,clbb_hp*l_hp*(l_hp+1)/2./np.pi*1e12, label='healpy') pl.plot(l_planck, clbb_planck, label='planck best fit') pl.legend(loc=label_loc) if xmax is None: pl.xlim(xmax=lmax) else: pl.xlim(xmax=xmax)
def cleaned_map(): weights = w_ell()[0] for ell in np.arange(lmax + 1): print('ell>>>', ell) for m in np.arange(ell + 1): for i in np.arange(map_num): alm_comb[i, hp.Alm.getidx(lmax=lmax, l=ell, m=m)] = alm_comb[ i, hp.Alm.getidx(lmax=lmax, l=ell, m=m)] * weights[ell, i] for j in np.arange(n_realization): alm_comb_n[:, j, hp.Alm.getidx( lmax=lmax, l=ell, m=m )] = alm_comb_n[:, j, hp.Alm.getidx(lmax=lmax, l=ell, m=m )] * weights[ell] cleaned_alm = np.sum(alm_comb, axis=0) noise_bias = np.sum(alm_comb_n, axis=0) Cl_noise = np.sum(hp.alm2cl(noise_bias, nspec=n_realization), axis=0) / n_realization np.save('Cl_noise_sim', Cl_noise) print('shape_cleaned_alm', np.shape(cleaned_alm)) cleaned_Cl = hp.alm2cl(cleaned_alm) Cl_debias = cleaned_Cl - Cl_noise cleaned_map = hp.alm2map(cleaned_alm, nside=512) ell = np.arange(len(cleaned_Cl)) cleaned_Dl = ell * (ell + 1) / 2 / np.pi * cleaned_Cl cleaned_Dl_debias = ell * (ell + 1) / 2 / np.pi * Cl_debias return cleaned_map, cleaned_Dl, cleaned_Dl_debias
def get_power(map_list, ivar_list, a, b, mask, N=20): """ Calculate the average coadded flattened power spectrum P_{ab} used to generate simulation for the splits. Inputs: map_list: list of source free splits ivar_list: list of the inverse variance maps splits a: 0,1,2 for I,Q,U respectively b:0,1,2 for I,Q,U, respectively N: window to smooth the power spectrum by in the rolling average. mask: apodizing mask Output: 1D power spectrum accounted for w2 from 0 to 10000 """ pmap = enmap.pixsizemap(map_list[0].shape, map_list[0].wcs) cl_ab = [] n = len(map_list) #calculate the coadd maps if a != b: coadd_a = coadd_mapnew(map_list, ivar_list, a) coadd_b = coadd_mapnew(map_list, ivar_list, b) else: coadd_a = coadd_mapnew(map_list, ivar_list, a) for i in range(n): print(i) if a != b: d_a = map_list[i][a] - coadd_a noise_a = d_a * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask alm_a = cs.map2alm(noise_a, lmax=10000) d_b = map_list[i][b] - coadd_b noise_b = d_b * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask alm_b = cs.map2alm(noise_b, lmax=10000) cls = hp.alm2cl(alm_a, alm_b) cl_ab.append(cls) else: d_a = map_list[i][a] - coadd_a noise_a = d_a * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask print("generating alms") alm_a = cs.map2alm(noise_a, lmax=10000) cls = hp.alm2cl(alm_a) cl_ab.append(cls) cl_ab = np.array(cl_ab) sqrt_ivar = np.sqrt(ivar_eff(0, ivar_list) / pmap) mask_ivar = sqrt_ivar * 0 + 1 mask_ivar[sqrt_ivar <= 0] = 0 mask = mask * mask_ivar mask[mask <= 0] = 0 w2 = np.sum((mask**2) * pmap) / np.pi / 4. power = 1 / n / (n - 1) * np.sum(cl_ab, axis=0) ls = np.arange(len(power)) power[~np.isfinite(power)] = 0 power = rolling_average(power, N) bins = np.arange(len(power)) power = maps.interp(bins, power)(ls) return power / w2
def run_alm2spec(path_name, overw): #TODO to process spectrum, need to know the beamfunction? is it 5arcmin? # beamf = {'12345-12345': hp.gauss_beam()} # C_lS_unsc = trsf_s.apply_beamf(C_lS_unsc, cf, ['12345-12345'], speccomb, beamf) # io.alert_cached(io.signal_sc_path_name) cmb_tlm, cmb_elm, cmb_blm = cslib.load_alms('cmb', csu.sim_id) C_lS_unsc = np.array([hp.alm2cl([cmb_tlm, cmb_elm, cmb_blm])[:,:csu.cf['pa']['lmax']+1]]) C_lS = trsf_s.apply_scale(C_lS_unsc, csu.cf['pa']["Spectrum_scale"]) io.save_data(C_lS, path_name) C_lS = hp.alm2cl([cmb_tlm, cmb_elm, cmb_blm])
def calc_IP2_equil(Imap, Qmap, Umap, lmax=100): Tlm = hp.map2alm(Imap) Elm, Blm = hp.map2alm_spin((Qmap, Umap), 2) TEE = hp.alm2cl(Tlm, Elm**2) TBB = hp.alm2cl(Tlm, Blm**2) TEB = hp.alm2cl(Tlm, Elm * Blm) ls = np.arange(len(TEE)) return ls, TEE, TBB, TEB
def on_epoch_end(self, batch, logs={}): self.epochs_waited += 1 if self.epochs_waited == self.patience: X_val, y_val = self.validation_data.T[0].T, self.validation_data.T[ 1].T if self.is_3d: X_val = np.expand_dims(X_val, axis=-1) y_predict = self.model.predict(X_val) trans = [] res = [] for i in range(np.squeeze(y_predict).T.shape[0]): y_pred = np.squeeze(y_predict).T[i].T.flatten( ) # choose given freq band, then flatten out y_pred = y_pred[self.rearr] y_pred = hp.map2alm(y_pred) y_pred = hp.alm2cl(y_pred) # Get Cls for COSMO spectrum y_v = np.squeeze(y_val) y_v = y_v.T[i].T.flatten() y_v = y_v[self.rearr] y_v = hp.map2alm(y_v) y_v = hp.alm2cl(y_v) # compute transfer trans.append(np.sqrt((y_pred / y_v))[1:]) # compute residual power spec res.append(np.abs(((y_v - y_pred) / y_v))[1:]) res_avg = np.mean(np.array(res)) trans_avg = np.mean(np.array(trans)) self._data['val_avg_transfer'].append(trans_avg) self._data['val_avg_res'].append(res_avg) if self.record_spectra: self._data['val_transfer'].append(np.array(trans)) self._data['val_res'].append(np.array(res)) print(" - val avg transfer: %0.4f" % (trans_avg)) print(" - val avg res: %0.4f" % (res_avg)) # reset number of epochs waited self.epochs_waited = 0 else: # increase num epochs waited self.epochs_waited += 1 return
def calc_IP2_equil(Imap, Qmap, Umap, lmax=100): Tlm = hp.map2alm( Imap ) Elm,Blm = hp.map2alm_spin( (Qmap,Umap), 2 ) TEE = hp.alm2cl( Tlm, Elm**2 ) TBB = hp.alm2cl( Tlm, Blm**2 ) TEB = hp.alm2cl( Tlm, Elm*Blm ) ls = np.arange( len(TEE) ) return ls, TEE, TBB, TEB
def _harmonic_ilc_alm(components, instrument, alms, lbins=None, fsky=None): cl_in = np.array([hp.alm2cl(alm) for alm in alms]) # Multipoles for the ILC bins lmax = hp.Alm.getlmax(alms.shape[-1]) ell = hp.Alm.getlm(lmax)[0] if lbins is not None: ell = np.digitize(ell, lbins) # NOTE: use lmax for indexing alms, ell.max() is the maximum bin index # Make alms real alms = np.asarray(alms, order='C') alms = alms.view(np.float64) alms[..., np.arange(1, 2 * (lmax + 1), 2)] = hp.UNSEEN # Mask imaginary m = 0 ell = np.stack((ell, ell), axis=-1).reshape(-1) if alms.ndim > 2: # TEB -> ILC indipendently on each Stokes n_stokes = alms.shape[1] assert n_stokes in [1, 3], "Alms must be either T only or T E B" alms[:, 1:, [0, 2, 2 * lmax + 2, 2 * lmax + 3]] = hp.UNSEEN # EB for ell < 2 ell = np.stack([ell] * n_stokes) # Replicate ell for every Stokes ell += np.arange(n_stokes).reshape(-1, 1) * (ell.max() + 1 ) # Add offset res = ilc(components, instrument, alms, ell) # Craft output res.s[res.s == hp.UNSEEN] = 0. res.s = np.asarray(res.s, order='C').view(np.complex128) cl_out = np.array([hp.alm2cl(alm) for alm in res.s]) res.cl_in = cl_in res.cl_out = cl_out if fsky: res.cl_in /= fsky res.cl_out /= fsky res.fsky = fsky lrange = np.arange(lmax + 1) ldigitized = np.digitize(lrange, lbins) with np.errstate(divide='ignore', invalid='ignore'): res.l_ref = (np.bincount(ldigitized, lrange * 2 * lrange + 1) / np.bincount(ldigitized, 2 * lrange + 1)) res.freq_cov *= 2 # sqrt(2) missing between complex-real alm conversion if res.s.ndim > 2: res.freq_cov = res.freq_cov.reshape(n_stokes, -1, *res.freq_cov.shape[1:]) res.W = res.W.reshape(n_stokes, -1, *res.W.shape[1:]) return res
def b_cov_T353_E143_B143(cl_file=pf.PLANCK_DATA_PATH+'bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl',lmax=100): Imap = hp.read_map(pf.PLANCK_DATA_PATH + 'HFI_SkyMap_353_2048_R2.02_full.fits') Tlm = hp.map2alm(Imap,lmax=lmax) cltt = hp.alm2cl(Tlm,lmax=lmax) mask = pf.get_planck_mask(psky=70) Qmap, Umap = hp.read_map(pf.PLANCK_DATA_PATH + 'HFI_SkyMap_143_2048_R2.02_full.fits',field=(1,2)) Elm, Blm = hp.map2alm_spin( (Qmap*mask,Umap*mask), 2, lmax=lmax ) clee = hp.alm2cl(Elm,lmax=lmax) clbb = hp.alm2cl(Blm,lmax=lmax) cov = calc_b_cov_TEB(cltt, clee, clbb) return cov
def compare_power(input_file1, input_file2, nside=2048, lmax=2000, read_inputs=True, object1=None, object2=None, display_plots=True): ''' simple power spectrum comparison ''' map_dens1 = create_healpix_map(input_file1, nside, read_inputs=read_inputs, object1=object1) map_dens2 = create_healpix_map(input_file2, nside, read_inputs=read_inputs, object1=object2) # scaling to remove monopole - this helps with power spectrum computation. mask_octant = compute_mask(nside=nside, octant="upper") map_dens1 = scale_map(map_dens1, mask_octant, nside=nside) map_dens2 = scale_map(map_dens2, mask_octant, nside=nside) print('comparing density maps') hp.mollview(map_dens1) hp.mollview(map_dens2) hp.mollzoom(map_dens1 - map_dens2) if display_plots: plt.show() alm_1 = hp.map2alm(map_dens1, lmax=lmax) alm_2 = hp.map2alm(map_dens2, lmax=lmax) cl_1 = hp.alm2cl(alm_1) cl_2 = hp.alm2cl(alm_2) print('computed power spectra') l = np.arange(lmax + 1) fsky = 1. / 8 plt.figure() plt.plot(l, cl_1 * l * (l + 1) / (2. * np.pi) / fsky) plt.plot(l, cl_2 * l * (l + 1) / (2. * np.pi) / fsky) plt.savefig('cls_' + input_file1 + '_' + input_file2 + '.png', dpi=500) if display_plots: plt.show() plt.figure() plt.plot(l, np.abs(cl_1 - cl_2) / cl_1) plt.savefig('cls_fractional_' + input_file1 + '_' + input_file2 + '.png', dpi=500) if display_plots: plt.show() return map_dens1, map_dens2, cl_1, cl_2
def b_cov_TEB(lmax=100,frequency=353): """this one is map-based""" Imap,Qmap, Umap = hp.read_map(pf.PLANCK_DATA_PATH + 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(frequency),field=(0,1,2)) mask = pf.get_planck_mask() Tlm = hp.map2alm(Imap*mask,lmax=lmax) cltt = hp.alm2cl(Tlm,lmax=lmax) Elm, Blm = hp.map2alm_spin( (Qmap*mask,Umap*mask), 2, lmax=lmax ) clee = hp.alm2cl(Elm,lmax=lmax) clbb = hp.alm2cl(Blm,lmax=lmax) #hs = get_hs(lmax=100) cov = calc_b_cov_TEB(cltt, clee, clbb)#/hs return cov
def __call__(self, alm1, alm2): assert alm1.lmaxt == alm2.lmaxt, (alm1.lmaxt, alm2.lmaxt) assert alm1.lmaxe == alm2.lmaxe, (alm1.lmaxe, alm2.lmaxe) assert alm1.lmaxb == alm2.lmaxb, (alm1.lmaxb, alm2.lmaxb) ret = np.sum( hp.alm2cl(alm1.tlm, alm2.tlm) * (2. * np.arange(0, alm1.lmaxt + 1) + 1)) ret += np.sum( hp.alm2cl(alm1.elm, alm2.elm) * (2. * np.arange(0, alm1.lmaxe + 1) + 1)) ret += np.sum( hp.alm2cl(alm1.blm, alm2.blm) * (2. * np.arange(0, alm1.lmaxb + 1) + 1)) return ret
def CG_algo(Matrix, b, data_start, i_max, eps): """ Cf. algorithm B2 from Shewchuk 94 (page 50) Matrix is the function to apply the matrix on a vector (eg A_matrix_func) data_start is a data_class class, with real alms """ i = 0 x = data_start.alm.copy() cl_th = data_start.cl_th beam = data_start.beam sigma = data_start.sigma invvar = data_start.invvar lmax = data_start.lmax nside = data_start.nside out = data_class([0, cl_th, beam, sigma, invvar, lmax, nside]) r = b - Matrix(data_start) d = data_class([0, cl_th, beam, sigma, invvar, lmax, nside]) d.alm = r.copy() delt_n = np.dot(r.T, r) delt_0 = delt_n.copy() iter_out_map = [] iter_out_cl = [] res = [] x_list = [] while i < i_max and delt_n > (eps ** 2 * delt_0): q = Matrix(d) alph = np.float(delt_n) / np.dot(d.alm.T, q) x = x + alph * d.alm if i % 10 == 0: dat_temp = data_class([0, cl_th, beam, sigma, invvar, lmax, nside]) dat_temp.alm = x r = b - Matrix(dat_temp) else: r = r - alph * q delt_old = delt_n.copy() delt_n = np.dot(r.T, r) bet = delt_n / delt_old d.alm = r + bet * d.alm i += 1 out.alm = x res.append( hp.alm2cl(real2complex_alm(r - (b - Matrix(data_start)))) / hp.alm2cl(real2complex_alm(b - Matrix(data_start))) ) # x_list.append(real2complex_alm(x)) iter_out_map.append(return_map(out)[1]) iter_out_cl.append(return_map(out)[0]) return iter_out_map, iter_out_cl, res # x_list
def get_window_power_cl(self, corr={}, indxs={}): win = {} if not self.use_window: win = {'cl': self.f_sky, 'M': self.coupling_M, 'xi': 1, 'xi_b': 1} return win m1m2 = np.absolute(self.m1_m2s[(corr[0], corr[1])]).flatten() # print(m1m2[0],self.wig_3j) wig_3j_1 = self.wig_3j[m1m2[0]] wig_3j_2 = self.wig_3j[m1m2[1]] z_bin1 = self.z_bins[corr[0]][indxs[0]] z_bin2 = self.z_bins[corr[1]][indxs[1]] alm1 = z_bin1['window_alm'] alm2 = z_bin2['window_alm'] win['cl'] = hp.alm2cl(alms1=alm1, alms2=alm2, lmax_out=self.window_lmax) #This is f_sky*cl. win['M'] = self.coupling_matrix_large( win['cl'], wig_3j_1, wig_3j_2) * (2 * self.l[:, None] + 1 ) #FIXME: check ordering #Note that this matrix leads to pseudo cl, which differs by factor of f_sky from true cl if self.do_xi: th, win['xi'] = self.HT.projected_correlation(l_cl=self.window_l, m1_m2=(0, 0), cl=win['cl']) win['xi_b'] = self.binning.bin_1d(xi=win['xi'], bin_utils=self.xi_bin_utils[(0, 0)]) del alm1 del alm2 return win
def calculate(self): """ calculate various properties of the gaussian, fnl, gnl maps i.e. compute * Ais * As * Cls * dipoles using remove_dipole """ inpCls=self.inputCls[:self.lmax+1] if (self.gausmap!=None): self.gausCls=hp.alm2cl(self.gausalm)[0:self.lmax+1] self.gausA0=get_A0(self.gausCls[1:], inpCls[1:]) self.gausAi=Ais(self.gausmap, self.lmax); self.gausA=AistoA(self.gausAi) self.gausmp=hp.remove_monopole(self.gausmap, fitval=True)[1] self.gausdipole=get_dipole(self.gausmap) if (self.gnlmaps!=None): self.gnlCls=[]; self.gnlA0=[]; self.gnlAi=[] self.gnlmp=[]; self.gnldipole=[]; self.gnlA=[] for i in range(self.Ngnls): self.gnlCls.append(hp.anafast(self.gnlmaps[i], nspec=self.lmax)) self.gnlA0.append(get_A0(self.gnlCls[i][1:], inpCls[1:])) self.gnlAi.append(Ais(self.gnlmaps[i], self.lmax)); self.gnlA.append(AistoA(self.gnlAi[i])) self.gnlmp.append(hp.remove_monopole(self.gnlmaps[i], fitval=True)[1]) self.gnldipole.append(get_dipole(self.gnlmaps[i]))
def VisualizeAlm(alm,figno=1,max_l=None,annot=''): """ Visualize a healpy a_lm vector """ lmax = hp.Alm.getlmax(f_lm.size) l,m = hp.Alm.getlm(lmax) mag = np.zeros([lmax+1,lmax+1]) phs = np.zeros([lmax+1,lmax+1]) a_lm = np.zeros([lmax+1,lmax+1],dtype='complex128') mag[m,l] = np.abs(alm) phs[m,l] = np.angle(alm) a_lm[m,l] = alm cl = hp.alm2cl(alm) # Decide the range of l to plot if max_l != None: max_l = (max_l if (max_l <= lmax) else lmax) else: max_l = lmax print max_l plt.figure(figno,figsize=(4,4)) plt.clf() plt.subplot(211) plt.imshow(mag[0:max_l,0:max_l],interpolation='nearest',origin='lower') plt.xlabel(r'$\ell$') plt.ylabel(r'$m$') plt.colorbar() plt.title(annot+'Magnitude') plt.subplot(212) plt.imshow(phs[0:max_l,0:max_l],interpolation='nearest',origin='lower') plt.xlabel(r'$\ell$') plt.ylabel(r'$m$') plt.colorbar() plt.title(annot+'Phase') # plt.subplot(313) #plt.semilogy(cl[0:max_l]) return {'mag':mag,'phs':phs,'cl':cl,'a_lm':a_lm}
def getsigmap(self, sigtype): """Generate a healpix CMB map with TT, EE, and BB""" fn = self.inputmap.replace('rxxxx', 'r{:04d}'.format(self.rlz)) if 'EnoB' in self.sigtype: fn = fn.replace('camb_planck2013', 'camb_planck2013_EnoB') # Load self.hmap = np.array(hp.read_map('input_maps/' + fn, field=(0, 1, 2))) self.Nside = hp.npix2nside(self.hmap[0].size) # Band limit. Technically we should not band limit and just simulate # with beam rolloff, but the map should not have power beyond the # nyquist limit of the beam postage stamp pixelizaiton, which for the # case of our sidelobes is quite coarse (0.2 deg) alm = hp.map2alm(self.hmap) cl = hp.alm2cl(alm[0]) fl = np.zeros_like(cl) l = np.arange(len(fl)) l0 = 500 l1 = 600 fl[0:l0] = 1.0 ll = np.arange(l1 - l0) ll = ll * np.pi / np.max(ll) fl[l0:l1] = 0.5 * (np.cos(ll) + 1) alm2 = [hp.almxfl(k, fl) for k in alm] self.hmap = np.array(hp.alm2map(alm2, self.Nside)) del alm del alm2 if 'EnoB' in self.sigtype: # no T->P for EnoB sims self.hmap[0, :] = 0
def fisher_single(par,v) : # v -> seen pixels nb=len(par.bins)-1 npix=hp.nside2npix(par.nside) npix_seen=len(par.ip_seen) lmax=3*par.nside-1 larr=np.arange(lmax+1) fisher=np.zeros([nb,nb]) pixsize=4*np.pi/hp.nside2npix(par.nside) v_map=np.zeros(npix); v_map[par.ip_seen]=v vcm1=invert_covar(par,v_map) v_lm=hp.map2alm(v_map,iter=0) vcm1_lm=hp.map2alm(vcm1,iter=0) for iba in np.arange(nb) : # print " Row %d"%iba transfer=np.zeros(lmax+1); transfer[par.bins[iba]:par.bins[iba+1]]=1. v_map2=hp.alm2map(hp.almxfl(v_lm,transfer),par.nside,verbose=False)/pixsize #Q_a * v v_map2cm1=invert_covar(par,v_map2) #C^-1 * Q_a * v va_lm=hp.map2alm(v_map2cm1,iter=0) cl_vcm1_va=(2*larr+1)*hp.alm2cl(vcm1_lm,alms2=va_lm) for ibb in np.arange(nb-iba)+iba : fisher[iba,ibb]=np.sum(cl_vcm1_va[par.bins[ibb]:par.bins[ibb+1]])/pixsize**2 if iba!=ibb : fisher[ibb,iba]=fisher[iba,ibb] return fisher
def getSsim(ell, Cl, lmax=100, cutSky=False): """ Purpose: create simulated S_{1/2} from input power spectrum Note: this calculates Jmn every time it is run so should not be used for ensembles Procedure: simulates full sky CMB, measures S_{1/2} Inputs: ell: the l values for the power spectrum Cl: the power spectrum lmax: the maximum ell value to use in calculation Default: 100 cutSky: set to True to convert to real space, apply mask, etc. Default: False Note: true option not yet implemented Returns: simulated S_{1/2} """ # get Jmn matrix for harmonic space S_{1/2} calc. myJmn = getJmn(lmax=lmax)[2:, 2:] # do not include monopole, dipole #alm_prim,alm_late = hp.synalm((primCl,lateCl,crossCl),lmax=lmax,new=True) almSim = hp.synalm( Cl, lmax=lmax) # question: does this need to start at ell[0]=1? ClSim = hp.alm2cl(almSim) return np.dot(ClSim[2:], np.dot(myJmn, ClSim[2:]))
def _get_cls_fg(self): print('======= COMPUTATION OF CL_FGS =======') if self.n_stokes == 3: d_spectra = self.d_fgs else: # Only P is provided, add T for map2alm d_spectra = np.zeros((self.n_freqs, 3, self.d_fgs.shape[2]), dtype=self.d_fgs.dtype) d_spectra[:, 1:] = self.d_fgs # Compute cross-spectra almBs = [ hp.map2alm(freq_map, lmax=self.lmax, iter=10)[2] for freq_map in d_spectra ] Cl_fgs = np.zeros((self.n_freqs, self.n_freqs, self.lmax + 1), dtype=self.d_fgs.dtype) for f1 in range(self.n_freqs): for f2 in range(self.n_freqs): if f1 > f2: Cl_fgs[f1, f2] = Cl_fgs[f2, f1] else: Cl_fgs[f1, f2] = hp.alm2cl(almBs[f1], almBs[f2], lmax=self.lmax) Cl_fgs = Cl_fgs[..., self.lmin:] / self.fsky return Cl_fgs
def VisualizeAlm(alm,figno=1,max_l=None): """ Visualize a healpy a_lm vector """ lmax = hp.Alm.getlmax(f_lm.size) l,m = hp.Alm.getlm(lmax) mag = np.zeros([lmax+1,lmax+1]) phs = np.zeros([lmax+1,lmax+1]) mag[m,l] = np.abs(alm) phs[m,l] = np.angle(alm) cl = hp.alm2cl(alm) # Decide the range of l to plot if max_l != None: max_l = (max_l if (max_l <= lmax) else lmax) else: max_l = lmax print max_l plt.figure(figno) plt.clf() plt.subplot(211) plt.imshow(mag[0:max_l,0:max_l],interpolation='nearest',origin='lower') plt.colorbar() plt.subplot(212) plt.imshow(phs[0:max_l,0:max_l],interpolation='nearest',origin='lower') plt.colorbar() # plt.subplot(313) #plt.semilogy(cl[0:max_l]) return {'mag':mag,'phs':phs,'cl':cl}
def VisualizeAlm(alm, figno=1, max_l=None): """ Visualize a healpy a_lm vector """ lmax = hp.Alm.getlmax(f_lm.size) l, m = hp.Alm.getlm(lmax) mag = np.zeros([lmax + 1, lmax + 1]) phs = np.zeros([lmax + 1, lmax + 1]) mag[m, l] = np.abs(alm) phs[m, l] = np.angle(alm) cl = hp.alm2cl(alm) # Decide the range of l to plot if max_l != None: max_l = (max_l if (max_l <= lmax) else lmax) else: max_l = lmax print max_l plt.figure(figno) plt.clf() plt.subplot(211) plt.imshow(mag[0:max_l, 0:max_l], interpolation='nearest', origin='lower') plt.colorbar() plt.subplot(212) plt.imshow(phs[0:max_l, 0:max_l], interpolation='nearest', origin='lower') plt.colorbar() # plt.subplot(313) #plt.semilogy(cl[0:max_l]) return {'mag': mag, 'phs': phs, 'cl': cl}
def map2power(iqu, mpibox): from orphics.tools.stats import bin2D, bin1D bin_edges = np.arange(200, 4000, 40) print("Map 2 alm...") alm = curvedsky.map2alm(iqu.astype("float64"), lmax=5000) del iqu cls = hp.alm2cl(alm) del alm fineells = np.arange(0, cls.shape[1], 1) print("Binning...") lbinner = bin1D(bin_edges) def b(cls): ells, cl1d = lbinner.binned(fineells, fineells * cls) ells, norm = lbinner.binned(fineells, fineells) cl1d /= norm return ells, cl1d ells, cltt = b(cls[0, :]) ells, clee = b(cls[1, :]) ells, clbb = b(cls[2, :]) ells, clte = b(cls[3, :]) ells, cleb = b(cls[4, :]) ells, cltb = b(cls[5, :]) mpibox.add_to_stats("TT", cltt) mpibox.add_to_stats("EE", clee) mpibox.add_to_stats("BB", clbb) mpibox.add_to_stats("TE", clte) mpibox.add_to_stats("EB", cleb) mpibox.add_to_stats("TB", cltb) return ells
def check_Tlm2d(nu=100, lmax=300, experiment='planck', maskfield=2, source_maskfield=0, label_loc='lower right', xmax=None): if experiment == 'planck': Imap_name = PLANCK_DATA_PATH + 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format( nu) Imap = hp.read_map(Imap_name) mask = hp.read_map(PLANCK_DATA_PATH + 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits', field=maskfield) smask = hp.read_map(PLANCK_DATA_PATH + 'HFI_Mask_PointSrc_2048_R2.00.fits', field=source_maskfield) mask *= smask hdulist = fits.open(PLANCK_DATA_PATH + 'HFI_RIMO_Beams-100pc_R2.00.fits') beam = hdulist[BEAM_INDEX['{}'.format(nu)]].data.NOMINAL[0][:lmax + 1] tlm = get_Tlm(lmax=lmax, Imap=Imap, mask=mask, healpy_format=False, recalc=True, div_beam=beam) tlm_hp = get_Tlm(lmax=lmax, Imap=Imap, mask=mask, healpy_format=True, recalc=True, div_beam=beam) cl = cl_alm2d(alm1=tlm, lmax=lmax) l = np.arange(len(cl)) cl_hp = hp.alm2cl(tlm_hp, lmax=lmax) l_hp = np.arange(len(cl_hp)) clplanck = np.loadtxt( data_path + 'bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl') cl_planck = clplanck[:, 1] l_planck = clplanck[:, 0] pl.figure() pl.title('TT check') pl.plot(l, cl * l * (l + 1) / 2. / np.pi * 1e12, label='2d') pl.plot(l_hp, cl_hp * l_hp * (l_hp + 1) / 2. / np.pi * 1e12, label='healpy') pl.plot(l_planck, cl_planck, label='planck best fit') pl.legend(loc=label_loc) if xmax is None: pl.xlim(xmax=lmax) else: pl.xlim(xmax=xmax)
def psd_unseen_helper(x, Nside): """Compute the Power Spectral Density for heaply maps (incomplete data).""" if len(x.shape) == 2 and x.shape[1] > 1: return np.stack([psd_unseen(x[ind, ]) for ind in range(len(x))]) y = np.zeros(shape=[hp.nside2npix(Nside)]) y[:] = hp.UNSEEN y[:len(x)] = x hatx = hp.map2alm(hp.reorder(y, n2r=True)) return hp.alm2cl(hatx)
def return_map(map_class): """ We solve for C^{-1/2}x, here is to recover x """ Shalf = np.sqrt(map_class.cl_th[: map_class.lmax + 1]) alm_out = hp.almxfl(real2complex_alm(map_class.alm), Shalf) cl_out = hp.alm2cl(alm_out) map_out = hp.alm2map(alm_out, map_class.nside) return cl_out, map_out
def get_spectra(emap1, emap2=None, lmax=5000): atol = np.min(np.array(emap1.pixshape()/eutils.arcmin)) alm1 = curvedsky.map2alm(emap1, lmax=lmax, atol=atol).astype(np.complex128) alm2 = alm1 if emap2 is None else curvedsky.map2alm(emap2, lmax=lmax, atol=atol).astype(np.complex128) cl = hp.alm2cl(alm1, alm2, lmax=lmax) l = np.arange(len(cl)) return (l, cl)
def make_sim(self,seed): with bench.show("Lensing operation...") if self.rank==0 else ignore(): full,kappa = lensing.rand_map(self.fshape, self.fwcs, self.ps, lmax=self.lmax, maplmax=self.lmax, seed=seed, verbose=True if self.rank==0 else False, dtype=self.dtype,output="lk") alms = curvedsky.map2alm(full,lmax=self.lmax) ps_data = hp.alm2cl(alms.astype(np.complex128)) del alms self.mpibox.add_to_stats("fullsky_ps",ps_data) south = full.submap(self.pos_south) equator = full.submap(self.pos_eq) ksouth = kappa.submap(self.pos_south) kequator = kappa.submap(self.pos_eq) del full del kappa if self.count==0: self.shape['s'], self.wcs['s'] = south.shape, south.wcs self.shape['e'], self.wcs['e'] = equator.shape, equator.wcs for m in ['s','e']: self.taper[m],self.w2[m] = fmaps.get_taper(self.shape[m],taper_percent = 18.0,pad_percent = 4.0,weight=None) self.w4[m] = np.mean(self.taper[m]**4.) self.w3[m] = np.mean(self.taper[m]**3.) self.rotator = fmaps.MapRotatorEquator(self.shape['s'],self.wcs['s'],self.wdeg,self.hdeg,width_multiplier=0.6, height_multiplier=1.2,downsample=True,verbose=True if self.rank==0 else False, pix_target_override_arcmin=self.pix_intermediate) self.taper['r'] = self.rotator.rotate(self.taper['s']) self.w2['r'] = np.mean(self.taper['r']**2.) self.w4['r'] = np.mean(self.taper['r']**4.) self.w3['r'] = np.mean(self.taper['r']**3.) self.shape['r'], self.wcs['r'] = self.rotator.shape_final, self.rotator.wcs_final self.fc = {} self.binner = {} self.modlmap = {} for m in ['s','e','r']: self.fc[m] = enmap.FourierCalc(self.shape[m],self.wcs[m]) self.modlmap[m] = enmap.modlmap(self.shape[m],self.wcs[m]) self.binner[m] = bin2D(self.modlmap[m],self.bin_edges) self.cents = self.binner['s'].centers self._init_qests() self.count += 1 south *= self.taper['s'] equator *= self.taper['e'] ksouth *= self.taper['s'] kequator *= self.taper['e'] return south,equator,ksouth,kequator
def calculate(self): """ calculate various properties of the gaussian, fnl, gnl maps i.e. compute * Ais * As * Cls * dipoles using remove_dipole """ inpCls = self.inputCls[:self.lmax + 1] if (self.gausmap0 != None): self.gausCls0 = hp.alm2cl(self.gausalm0)[0:self.lmax + 1] self.gausCls1 = hp.alm2cl(self.gausalm1)[0:self.lmax + 1] self.gausA0 = get_A0(self.gausCls0[1:], inpCls[1:]) self.gausAi = Ais(self.gausmap1, self.lmax) self.gausA = AistoA(self.gausAi) self.gausAi2 = Ais(self.gausmap0, self.lmax) self.gausA2 = AistoA(self.gausAi2) self.gausmp = hp.remove_monopole(self.gausmap0, fitval=True)[1] self.gausdipole = get_dipole(self.gausmap1) if (self.fnlmaps1 != None): self.fnlCls0 = [] self.fnlCls1 = [] self.fnlA0 = [] self.fnlAi = [] self.fnlAi2 = [] self.fnlmp = [] self.fnldipole = [] self.fnlA = [] self.fnlA2 = [] for i in range(self.Nfnls): #self.fnlCls0.append(hp.anafast(self.fnlmaps0[i], nspec=self.lmax)) self.fnlCls1.append( hp.anafast(self.fnlmaps1[i], nspec=self.lmax)) #self.fnlA0.append(get_A0(self.fnlCls0[i][1:], inpCls[1:])) self.fnlAi.append(Ais(self.fnlmaps1[i], self.lmax)) self.fnlA.append(AistoA(self.fnlAi[i])) #self.fnlAi2.append(Ais(self.fnlmaps0[i], self.lmax)); self.fnlA2.append(AistoA(self.fnlAi2[i])) #self.fnlmp.append(hp.remove_monopole(self.fnlmaps0[i], fitval=True)[1]) self.fnldipole.append(get_dipole(self.fnlmaps1[i]))
def get_sim_cl(self, key, idx): assert len(key) == 2, key f1, f2 = key i = self.fields.index(f1) j = self.fields.index(f2) if i > j: return self.get_sim_cl(f2 + f1, idx) fname = self.lib_dir + '/cl%s_%04d.dat' % (key, idx) if not os.path.exists(fname): alm1 = self.get_sim_alm(idx, f1) alm2 = alm1 if f2 == f1 else self.get_sim_alm(idx, f2) np.savetxt(fname, hp.alm2cl(alm1, alms2=alm2)) return np.loadtxt(fname)
def test_field_get_alms(): nside = 32 npix = hp.nside2npix(nside) mp = np.random.randn(3, npix) msk = np.ones(npix) # Spin 0 f = nmt.NmtField(msk, [mp[0]], n_iter=0) alm = f.get_alms()[0] cl_tt_nmt = hp.alm2cl(alm) # Spin 2 f = nmt.NmtField(msk, mp[1:], n_iter=0) alm = f.get_alms() cl_ee_nmt = hp.alm2cl(alm[0]) cl_bb_nmt = hp.alm2cl(alm[1]) cl_tt, cl_ee, cl_bb, cl_te, cl_eb, cl_tb = hp.anafast(mp, iter=0, pol=True) assert (np.all(np.fabs(cl_tt_nmt / cl_tt - 1) < 1E-10)) assert (np.all(np.fabs(cl_ee_nmt[2:] / cl_ee[2:] - 1) < 1E-10)) assert (np.all(np.fabs(cl_bb_nmt[2:] / cl_bb[2:] - 1) < 1E-10))
def check_simulation(a, b, map_list, sim_list, ivar_list, mask): """ Check whether simulated power spectrum P_{ab} is consistent with data. Returns list of (split_sim-coadd,split_data-coadd) weighted by the mask*effective_ivar. """ shape = ivar_list[0].shape wcs = ivar_list[0].wcs pmap = enmap.pixsizemap(shape, wcs) sim_coadd = [] data_coadd = [] for i in range(len(sim_list)): dsim = sim_list[i] - coadd_map(sim_list, ivar_list) dsim = dsim * mask * ivar_eff(i, ivar_list) / pmap testalm = cs.map2alm(dsim, lmax=10000) testalm = testalm.astype(np.complex128) testcl = hp.alm2cl(testalm) sim_coadd.append(testcl) if a == b: for i in range(len(map_list)): dataco = map_list[i][a] - coadd_mapnew(map_list, ivar_list, a) dataco = dataco * mask * ivar_eff(i, ivar_list) / pmap testalm = cs.map2alm(dataco, lmax=10000) testalm = testalm.astype(np.complex128) testcl = hp.alm2cl(testalm) data_coadd.append(testcl) else: for i in range(len(map_list)): data_a = map_list[i][a] - coadd_mapnew(map_list, ivar_list, a) data_a = data_a * mask * ivar_eff(i, ivar_list) / pmap data_b = map_list[i][b] - coadd_mapnew(map_list, ivar_list, b) data_b = data_b * mask * ivar_eff(i, ivar_list) / pmap testalm_a = cs.map2alm(data_a, lmax=10000) testalm_a = testalm_a.astype(np.complex128) testalm_b = cs.map2alm(data_b, lmax=10000) testalm_b = testalm_b.astype(np.complex128) testcl = hp.alm2cl(testalm_a, testalm_b) data_coadd.append(testcl) sim_coadd = np.array(sim_coadd) data_coadd = np.array(data_coadd) return (sim_coadd, data_coadd)
def test_alm2cl(self): nside = 32 lmax = 64 lmax_out = 100 seed = 12345 np.random.seed(seed) # Input power spectrum and alm alm_syn = hp.synalm(self.cla, lmax=lmax) cl_out = hp.alm2cl(alm_syn, lmax_out=lmax_out - 1) np.testing.assert_array_almost_equal(cl_out, self.cla[:lmax_out], decimal=4)
def check_TE2d(nu=100, lmax=300, maskfield=2, source_maskfield=0, label_loc='lower right', xmax=None): map_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nu) I,Q,U =hp.read_map(data_path + map_name, field=(0,1,2)) mask=hp.read_map(data_path + 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits', field=maskfield) smask=hp.read_map(data_path + 'HFI_Mask_PointSrc_2048_R2.00.fits', field=source_maskfield) mask *= smask hdulist = fits.open(data_path + 'HFI_RIMO_Beams-100pc_R2.00.fits') beamP = hdulist[beam_index['{}P'.format(nu)]].data.NOMINAL[0][:lmax+1] beam = hdulist[beam_index['{}'.format(nu)]].data.NOMINAL[0][:lmax+1] #tlm = get_Tlm(lmax=lmax, Imap=I, mask=mask, # healpy_format=False, recalc=True, div_beam=beam) #elm,blm = get_ElmBlm(lmax=lmax, Qmap=Q, Umap=U, mask=mask, # healpy_format=False, recalc=True, div_beam=beamP) tlm_hp = get_Tlm(lmax=lmax, Imap=I, mask=mask, healpy_format=True, recalc=True, div_beam=beam) elm_hp,blm_hp = get_ElmBlm(lmax=lmax, Qmap=Q, Umap=U, mask=mask, healpy_format=True, recalc=True, div_beam=beamP) #cltt = cl_alm2d(tlm, lmax) #clee = cl_alm2d(elm, lmax) #clbb = cl_alm2d(blm, lmax) #l = np.arange(len(clee)) clte_hp = hp.alm2cl(tlm_hp, elm_hp, lmax=lmax) #clee_hp = hp.alm2cl(elm_hp, lmax=lmax) #clbb_hp = hp.alm2cl(blm_hp, lmax=lmax) l_hp = np.arange(len(clte_hp)) clplanck = np.loadtxt(data_path + 'bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl') clte_planck = clplanck[:,2] #clee_planck = clplanck[:,3] #clbb_planck = clplanck[:,4] l_planck = clplanck[:,0] pl.figure() pl.title('TE check') #pl.plot(l, clee*l*(l+1)/2./np.pi*1e12, label='2d') pl.plot(l_hp,clte_hp*l_hp*(l_hp+1)/2./np.pi*1e12, label='healpy') pl.plot(l_planck, clte_planck, label='planck best fit') pl.legend(loc=label_loc) if xmax is None: pl.xlim(xmax=lmax) else: pl.xlim(xmax=xmax)
def test_no_stokes(self): NFREQ = 3 NSIDE = 2 np.random.seed(0) alms = [ hp.map2alm(np.random.normal(size=(12 * NSIDE**2))) for i in range(NFREQ) ] res = _empirical_harmonic_covariance(alms) ref = np.empty_like(res) for f1 in range(NFREQ): for f2 in range(NFREQ): ref[f1, f2] = hp.alm2cl(alms[f1], alms[f2]) aac(ref, res)
def test_utils_contract_almxblm_cl(self): # Check if contraction matches hp.alm2cl. alm = np.ones(10, dtype=np.complex128) alm += 1j * np.ones(10, dtype=np.complex128) alm[:4] = 1 lmax = 3 ells = np.asarray([0, 1, 2, 3]) cl = hp.alm2cl(alm) ans_exp = np.sum(cl * (2 * ells + 1)) ans = utils.contract_almxblm(alm, np.conj(alm)) self.assertEqual(ans, ans_exp)
def correlation2pt(map1, map2=None, npoints=100): if map2 is None: map2 = map1 alm1 = hp.map2alm(map1) alm2 = hp.map2alm(map2) clcross = hp.alm2cl(alm1, alm2) thetas = np.linspace(0., np.pi, npoints) corr = np.zeros(npoints) for i,theta in enumerate(thetas): for l,cl in enumerate(clcross): lfactor = (2*l + 1.)/(4.*np.pi) * scipy.special.lpmv(0, l, np.cos(theta)) corr[i] += lfactor * cl return thetas, corr
def wiener_filter_for_alm(alm, lmax=None, fwhm=0.0, f_sky=1.0, sky_prior=None): if lmax is None: lmax = hp.Alm.getlmax(len(alm), None) if sky_prior is None: spectra_th = np.load("/global/homes/b/banerji/simulation/spectra/r_001/lensedtot_cls.npy")[0,:lmax+1] else: spectra_th = estimate_cl(sky_prior, lmax, fwhm=fwhm, pol=False) Bl = hp.gauss_beam(fwhm=fwhm, lmax=lmax, pol=False) spectra_ob = hp.alm2cl(alm, lmax_out=lmax) spectra_ob /= f_sky*Bl**2 filter_response = spectra_ob/spectra_th filter_response[:2] = 1.0 filter_response = np.sqrt(filter_response) return filter_response
def check_Tlm2d(nu=100, lmax=300, maskfield=2, source_maskfield=0, label_loc='lower right', xmax=None): Imap_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nu) Imap =hp.read_map(data_path + Imap_name) mask=hp.read_map(data_path + 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits', field=maskfield) smask=hp.read_map(data_path + 'HFI_Mask_PointSrc_2048_R2.00.fits', field=source_maskfield) mask *= smask hdulist = fits.open(data_path + 'HFI_RIMO_Beams-100pc_R2.00.fits') beam = hdulist[beam_index['{}'.format(nu)]].data.NOMINAL[0][:lmax+1] tlm = get_Tlm(lmax=lmax, Imap=Imap, mask=mask, healpy_format=False, recalc=True, div_beam=beam) tlm_hp = get_Tlm(lmax=lmax, Imap=Imap, mask=mask, healpy_format=True, recalc=True, div_beam=beam) cl = cl_alm2d(alm1=tlm, lmax=lmax) l = np.arange(len(cl)) cl_hp = hp.alm2cl(tlm_hp, lmax=lmax) l_hp = np.arange(len(cl_hp)) clplanck = np.loadtxt(data_path + 'bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl') cl_planck = clplanck[:,1] l_planck = clplanck[:,0] pl.figure() pl.title('TT check') pl.plot(l, cl*l*(l+1)/2./np.pi*1e12, label='2d') pl.plot(l_hp,cl_hp*l_hp*(l_hp+1)/2./np.pi*1e12, label='healpy') pl.plot(l_planck, cl_planck, label='planck best fit') pl.legend(loc=label_loc) if xmax is None: pl.xlim(xmax=lmax) else: pl.xlim(xmax=xmax)
def plot_tests(): spec_mf_b = hp.alm2cl(hp.almxfl(mf_lm_new,bl)) spec_mf = hp.alm2cl((mf_lm_new)) spec = hp.alm2cl(dlm) diff = hp.alm2cl(dlm - (hp.almxfl(mf_lm_new,bl))) fluc_l = hp.alm2cl(fluc) nnn = hp.alm2cl(dlm-hp.almxfl(mf_lm_new,bl)) plt.figure() plt.plot(spec_mf,label='mean field ($\hat{s}$)') plt.plot(spec_mf_b,label='beamed mean field ($A\hat{s}$)') plt.plot(spec,label='data (d)') plt.plot(spec - spec_mf_b,label='($d_\ell - (A\hat{s})_\ell$)') plt.plot(diff,label='($d - (A\hat{s})$)$_\ell$') plt.plot(fluc_l,"-.",label='($\hat{f}$)$_\ell$') plt.plot(nl,"-.",label="noise") plt.plot(cl,"-.",label="trial spectrum") plt.yscale("log") plt.legend(loc = 'best')
def calibrate_fsky(mode, nsim=1, smear=True,apo=2, psky=70, f=353, nside=2048,lmax=1000, visual_check=False, beam_file=pf.PLANCK_DATA_PATH+'HFI_RIMO_Beams-100pc_R2.00.fits', mask_sources=True, put_mask=True): """No noise treatment here""" fsky_correction = [] TTm = np.zeros(lmax + 1) EEm = np.zeros(lmax + 1) BBm = np.zeros(lmax + 1) if put_mask: print 'reading mask...' mask = pf.get_planck_mask(psky=psky, mask_sources=mask_sources, apodization=apo) else: mask = None print 'reading beams...' hdulist = pf.fits.open(beam_file) beam = hdulist[pf.BEAM_INDEX['{}'.format(f)]].data.NOMINAL[0][:lmax+1] beamP = hdulist[pf.BEAM_INDEX['{}P'.format(f)]].data.NOMINAL[0][:lmax+1] beam = beam[:lmax+1] beamP = beamP[:lmax+1] if mode == 'fg': ls, cls_theory = get_theory_fg(f=f, lmax=lmax, psky=psky, apo=apo) if mode == 'cmb': ls, cls_theor = pf.get_theory_cmb(lmax=lmax, mode='cl') factor = ls*(1+ls) for i in np.arange(nsim): print 'sim #{}...'.format(i+1) I, Q, U = pf.simulate_cmb_map(nside=nside, lmax=lmax, frequency=f,smear=smear, cls_theory=cls_theory, beam=beam, beamP=beamP, save=False, beam_file=None) print 'Cl #{}...'.format(i+1) Tlm, Elm, Blm = pf.calc_alm(I, Q, U, mask=mask, lmax=lmax, add_beam=None,add_beamP=None, div_beam=beam,div_beamP=beamP, healpy_format=True) TT = hp.alm2cl(Tlm) EE = hp.alm2cl(Elm) BB = hp.alm2cl(Blm) #ls, TT, EE, BB, TE, TB, EB = measure_dlcl(mask=mask, Imap=I, Qmap=Q, Umap=U, # beam=beam, beamP=beamP, # mode='cl',frequency=f, # lmax=lmax,lmin=0, # put_mask=put_mask, # psky=psky, # mask_sources=mask_sources, # apodization=apo) TTm += TT/nsim; EEm += EE/nsim; BBm += BB/nsim if visual_check: hp.mollview(I) hp.mollview(Q) hp.mollview(U) fsky_correction.append(cls_theory[0] / TTm) fsky_correction.append(cls_theory[1] / EEm) fsky_correction.append(cls_theory[2] / BBm) fsky_correction = np.array(fsky_correction) fsky_correction[np.isnan(fsky_correction)] = 0. fsky_correction[fsky_correction==np.inf] = 0. return ls, fsky_correction, TTm, EEm, BBm, cls_theory
lmax = 3*nside-1 l,m = hp.Alm.getlm(lmax) n_lm = len(l) ell = np.arange(0,lmax+1) below_horizon = np.where(theta > np.pi/2.)[0] nu = np.linspace(100,200,num=203)*u.MHz # Define primary beam #A = np.exp(-np.power(theta,2)/(2.*0.1)) #A = np.exp(-np.power(theta,2)/(2.*0.1)) A = np.ones_like(theta) A[below_horizon] = 0. a_lm = hp.map2alm(A,lmax=lmax) Cl_A = hp.alm2cl(a_lm,lmax=lmax) Cl_G = hp.gauss_beam(np.radians(90.),lmax) A_nu = np.outer(A,np.ones_like(nu)) #apod = np.ones_like(theta) #apod = np.exp(-np.power(theta,2)/(2.*0.5)) #disc = hp.query_disc(nside,[0,0,1],np.radians(80.)) #apod[disc] = 0. #apod = 1-np.exp(-np.power(theta,2)/(2.*0.5)) #apod = #apod = np.power(np.sin(phi),2) # Define fringe bmag = 30. bvec = np.array([0,1,0])*bmag b = np.outer(bvec,np.ones(npix))*u.m
bdots = np.sum(b*s,axis=0) fringe = np.exp(-2.*np.pi*1j*np.outer(bdots.value,nu.to(u.Hz).value)/c.c.value) if True: fringe[below_horizon] = 0. beam[below_horizon] = 0. ## V_G(nu) = s_00(u) T(nu) # Average compensates for any nside changes, normalized to beam solid angle (?) omega_nu = 4.*np.pi*beam_nu.sum(axis=0)/npix T_nu = np.average(beam_nu*fringe,axis=0) window = np.hanning(len(T_nu)) dtrans_T_nu = np.fft.fftshift(np.fft.fft(window*T_nu)) Ptau_T_nu = np.abs(dtrans_T_nu) f_lm = hp.map2alm(fringe[:,100]) C_f = hp.alm2cl(f_lm) a_lm = hp.map2alm(beam_nu[:,100]) C_a = hp.alm2cl(a_lm) C_fa = hp.alm2cl(f_lm*a_lm) figno = 5 title = 'Gah'#'Sin^16(phi)' filename = '' plt.figure(figno) plt.clf() plt.subplot(231) plt.plot(nu,T_nu.real,'b') plt.plot(nu,T_nu.real-T_nu.mean(),'b--') plt.plot(nu,window*T_nu.real,'g')
else:no_cache = [] for i,file in enumerate(args): print file name = file.split('/')[-1] try: if not name in no_cache and not ('all' in no_cache): s_cl = n.loadtxt(name+'.cl') print "loading",name+'.cl' except(IOError): no_cache.append(name) if name in no_cache or 'all' in no_cache: s = hp.read_map(file) print "computing alms" s_alm = hp.map2alm(s) s_cl = hp.alm2cl(s_alm) n.savetxt(name+'.cl',s_cl) cls[file] = s_cl * 0.002**(opts.conv[i]) figure(88) for cl in cls: s_cl = cls[cl] loglog(s_cl*n.linspace(0,len(s_cl),len(s_cl))**2,label=cl) legend(loc='lower right') xlabel('$\ell$') ylabel('$\ell^2 C_\ell$ $[K^2]$') figure(89) for cl in cls: s_cl = cls[cl]
def main(i_sim=0): ''' MPI Setup ''' o_comm = MPI.COMM_WORLD i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size) i_size = o_comm.Get_size() # number of cores assigned to run this program o_status = MPI.Status() i_work_tag = 0 i_die_tag = 1 ''' Loading and calculating power spectrum components ''' # Get run parameters s_fn_params = 'data/params.pkl' (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params) s_fn_cltt = ('sims/na_cltt_sim_%i.npy' % i_sim) if (i_rank == 0): f_t1 = time.time() print "" print "Run parameters:" print "(Using %i cores)" % i_size print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name) print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt) print "" print "Loading ell, r, dr, alpha, beta, cltt, and beam..." na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, usecols=(0,1,2,3,4), unpack=True, skiprows=3) na_l = np.unique(na_l) na_r = np.unique(na_r)[::-1] na_l = na_l[:i_lmax] i_num_ell = len(na_l) i_num_r = len(na_r) na_alpha = na_alpha.reshape(i_num_ell, i_num_r) na_beta = na_beta.reshape(i_num_ell, i_num_r) na_dr = na_dr.reshape(i_num_ell, i_num_r) na_dr = na_dr[0] if (i_rank == 0): print "(sizes from file load)" print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell) if (len(sys.argv) > 2): i_lmax_run = int(sys.argv[2]) else: i_lmax_run = i_lmax if (len(sys.argv) > 3): i_num_r_run = int(sys.argv[3]) else: i_num_r_run = i_num_r i_lmax_run = min(i_lmax_run, len(na_l)) i_num_r_run = min(i_num_r, i_num_r_run) i_r_steps = i_num_r / i_num_r_run na_mask = hp.read_map(s_fn_mask) s_fn_mll = 'output/na_mll_%i_lmax.npy' % i_lmax_run na_mll = np.load(s_fn_mll) na_mll_inv = np.linalg.inv(na_mll) if (i_rank == 0): print "(sizes for run)" print "i_num_r_run: %i, i_lmax_run: %i" % (i_num_r_run, i_lmax_run) na_l = na_l[:i_lmax_run] na_r = na_r[::i_r_steps] na_dr = na_dr[::i_r_steps] na_alpha = na_alpha[:i_lmax_run, ::i_r_steps] na_beta = na_beta[:i_lmax_run, ::i_r_steps] na_cltt = np.load(s_fn_cltt) na_cltt = na_cltt[:i_lmax_run] na_bl = np.load(s_fn_beam) na_bl = na_bl[:i_lmax_run] # f_t2 = time.time() if (i_rank == 0): print "" print "Calculating full kurtosis power spectra..." na_alm = hp.synalm(na_cltt, lmax=i_lmax_run, verbose=False) # f_t3 = time.time() na_work = np.zeros(2, dtype='i') na_result = np.zeros((2,i_lmax_run), dtype='d') li_dims = [i_num_r_run, i_num_r_run] # master loop if (i_rank == 0): na_kl22_data = np.zeros(i_lmax_run) na_kl31_data = np.zeros(i_lmax_run) # send initial jobs for i_rank_out in range(1, i_size): na_work = np.array(cart_index(i_rank_out-1, li_dims), dtype='i') o_comm.Send([na_work, MPI.INT], dest=i_rank_out, tag=i_work_tag) na_work = np.array(cart_index(i_size-1, li_dims), dtype='i') i_r1_start = na_work[0] i_r2_start = na_work[1] for i_r1 in range(i_r1_start, i_num_r_run): if (i_r1 % (i_num_r / 10) == 0): print "Finished %i%% of jobs... (%.2f s)" % (i_r1 * 100 / i_num_r_run, time.time() - f_t1) for i_r2 in range(i_r2_start, i_num_r_run): na_work = np.array([i_r1, i_r2], dtype='i') o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) #print "received results from core %i" % o_status.Get_source() o_comm.Send([na_work,MPI.INT], dest=o_status.Get_source(), tag=i_work_tag) na_kl22_data += na_result[0] na_kl31_data += na_result[1] for i_rank_out in range(1, i_size): o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) na_kl22_data += na_result[0] na_kl31_data += na_result[1] o_comm.Send([np.array([9999], dtype='i'), MPI.INT], dest=o_status.Get_source(), tag=i_die_tag) #slave loop: else: while(1): o_comm.Recv([na_work, MPI.INT], source=0, status=o_status, tag=MPI.ANY_TAG) if (o_status.Get_tag() == i_die_tag): break i_r1 = na_work[0] i_r2 = na_work[1] #print "doing work for r = %i on core %i" % (i_r, i_rank) na_Almr1 = hp.almxfl(na_alm, na_alpha[:,i_r1] / na_cltt * na_bl) na_Blmr1 = hp.almxfl(na_alm, na_beta[:,i_r1] / na_cltt * na_bl) na_Almr2 = hp.almxfl(na_alm, na_alpha[:,i_r2] / na_cltt * na_bl) na_Blmr2 = hp.almxfl(na_alm, na_beta[:,i_r2] / na_cltt * na_bl) # f_t4 = time.time() #all da maps na_Ar1n = hp.alm2map(na_Almr1, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Br1n = hp.alm2map(na_Blmr1, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Ar2n = hp.alm2map(na_Almr2, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Br2n = hp.alm2map(na_Blmr2, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Ar1n = na_Ar1n * na_mask na_Br1n = na_Br1n * na_mask na_Ar2n = na_Ar2n * na_mask na_Br2n = na_Br2n * na_mask # f_t5 = time.time() #print "starting map2alm for r = %i on core %i" % (i_r, i_rank) na_ABlmr1 = hp.map2alm(na_Ar1n*na_Br1n, lmax=i_lmax_run) if i_r1 == i_r2: na_B2lmr1 = hp.map2alm(na_Br1n*na_Br1n, lmax=i_lmax_run) na_AB2lmr1 = hp.map2alm(na_Ar1n*na_Br1n*na_Br1n, lmax=i_lmax_run) na_ABAlmr1 = hp.map2alm(na_Ar1n*na_Br1n*na_Ar1n, lmax=i_lmax_run) na_ABlmr2 = hp.map2alm(na_Ar2n*na_Br2n, lmax=i_lmax_run) na_B2lmr2 = hp.map2alm(na_Br2n*na_Br2n, lmax=i_lmax_run) #print "finished map2alm for r = %i on core %i" % (i_r, i_rank) # f_t6 = time.time() na_Jl_ABA_B = hp.alm2cl(na_ABAlmr1, na_Blmr2, lmax=i_lmax_run) na_Jl_AB_AB = hp.alm2cl(na_ABlmr1, na_ABlmr2, lmax=i_lmax_run) na_Jl_ABA_B = na_Jl_ABA_B[1:] na_Jl_AB_AB = na_Jl_AB_AB[1:] if i_r1 == i_r2: na_Ll_AB2_B = hp.alm2cl(na_AB2lmr1, na_Blmr1, lmax=i_lmax_run) na_Ll_AB_B2 = hp.alm2cl(na_ABlmr1, na_B2lmr1, lmax=i_lmax_run) na_Ll_AB2_B = na_Ll_AB2_B[1:] na_Ll_AB_B2 = na_Ll_AB_B2[1:] #f_t7 = time.time() na_result = np.zeros((2,i_lmax_run), dtype='d') if i_r1 == i_r2: na_result[0] += ((5./3.)**2. * na_Jl_AB_AB * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2] + 2. * na_Ll_AB_B2 * na_r[i_r1]**2. * na_dr[i_r1]) #kl22 na_result[1] += ((5./3.)**2. * na_Jl_ABA_B * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2] + 2. * na_Ll_AB2_B * na_r[i_r1]**2. * na_dr[i_r1]) #kl31 else: na_result[0] += ((5./3.)**2. * na_Jl_AB_AB * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2]) #kl22 na_result[1] += ((5./3.)**2. * na_Jl_ABA_B * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2]) #kl31 #print "finished work for r = %i on core %i" % (i_r, i_rank) o_comm.Send([na_result,MPI.DOUBLE], dest=0, tag=1) # print "Load time: %.2f s" % (f_t2 - f_t1) # print "synalm time: %.2f s" % (f_t3 - f_t2) # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) f_t8 = time.time() if (i_rank == 0): s_fn_kl22_data_no_mll = 'output/na_kl22_data_g_sim_%i_%i_rsteps_%i_lmax_no_mll.dat' % (i_sim, i_num_r_run, i_lmax_run) s_fn_kl31_data_no_mll = 'output/na_kl31_data_g_sim_%i_%i_rsteps_%i_lmax_no_mll.dat' % (i_sim, i_num_r_run, i_lmax_run) print "" print "Saving power spectrum to %s (not mll corrected)" % s_fn_kl22_data_no_mll print "Saving power spectrum to %s (not mll corrected)" % s_fn_kl31_data_no_mll np.savetxt(s_fn_kl22_data_no_mll, na_kl22_data) np.savetxt(s_fn_kl31_data_no_mll, na_kl31_data) s_fn_kl22_data = 'output/na_kl22_data_g_sim_%i_%i_rsteps_%i_lmax.dat' % (i_sim, i_num_r_run, i_lmax_run) s_fn_kl31_data = 'output/na_kl31_data_g_sim_%i_%i_rsteps_%i_lmax.dat' % (i_sim, i_num_r_run, i_lmax_run) print "" print "Saving power spectrum to %s" % s_fn_kl22_data print "Saving power spectrum to %s" % s_fn_kl31_data na_kl22_data = np.dot(na_mll_inv, na_kl22_data) na_kl31_data = np.dot(na_mll_inv, na_kl31_data) np.savetxt(s_fn_kl22_data, na_kl22_data) np.savetxt(s_fn_kl31_data, na_kl31_data) # print "Finished in %.2f s" % (f_t8 - f_t1) # # print "Load time: %.2f s" % (f_t2 - f_t1) # # print "synalm time: %.2f s" % (f_t3 - f_t2) # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) return
def main(): ''' MPI Setup ''' o_comm = MPI.COMM_WORLD i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size) i_size = o_comm.Get_size() # number of cores assigned to run this program o_status = MPI.Status() i_work_tag = 0 i_die_tag = 1 ''' Loading and calculating power spectrum components ''' # Get run parameters s_fn_params = 'data/params.pkl' (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params) #s_fn_cltt = 'sims/cl_fnl_0.dat' if (i_rank == 0): s_fn_cl21_data = 'output/cl21_data.dat' s_fn_cl21_data_no_mll = 'output/cl21_data_no_mll.dat' #s_fn_cl21_data = 'output/cl21_ps_smica.dat' #s_fn_cl21_data_no_mll = 'output/cl21_ps_smica_no_mll.dat' f_t1 = time.time() print "" print "Run parameters:" print "(Using %i cores)" % i_size print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name) print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt) print "" print "Loading ell, r, dr, alpha, beta, cltt, and beam..." na_mask = hp.read_map(s_fn_mask) #s_fn_mll = 'output/na_mll_%i_lmax.npy' % i_lmax s_fn_mll = 'output/na_mll_1499_lmax.npy' na_mll = np.load(s_fn_mll) na_mll_inv = np.linalg.inv(na_mll) na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, usecols=(0,1,2,3,4), unpack=True, skiprows=3) na_l = np.unique(na_l) na_r = np.unique(na_r)[::-1] i_num_r = len(na_r) try: na_cltt = np.load(s_fn_cltt) except: na_cltt = np.loadtxt(s_fn_cltt) na_bl = np.load(s_fn_beam) na_alpha = na_alpha.reshape(len(na_l), i_num_r) na_beta = na_beta.reshape(len(na_l), i_num_r) na_dr = na_dr.reshape(len(na_l), i_num_r) na_dr = na_dr[0] i_num_ell = min(len(na_l), len(na_cltt), len(na_bl), i_lmax) na_l = na_l[:i_num_ell] na_cltt = na_cltt[:i_num_ell] na_bl = na_bl[:i_num_ell] na_alpha = na_alpha[:i_num_ell,:] na_beta = na_beta[:i_num_ell,:] if (i_rank == 0): print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell) # f_t2 = time.time() if (i_rank == 0): print "" print "Calculating full skewness power spectrum..." s_fn_alm = 'output/na_alm_data.fits' #s_fn_alm = 'data/ps_sim/alm_ps_smica_ell_2000.fits' na_alm = hp.read_alm(s_fn_alm) na_alm = na_alm[:hp.Alm.getsize(i_num_ell)] # f_t3 = time.time() na_cl21_data = np.zeros(i_num_ell) na_work = np.zeros(1, dtype='i') na_result = np.zeros(i_num_ell, dtype='d') # master loop if (i_rank == 0): # send initial jobs for i_rank_out in range(1,i_size): na_work = np.array([i_rank_out-1], dtype='i') o_comm.Send([na_work, MPI.INT], dest=i_rank_out, tag=i_work_tag) for i_r in range(i_size-1,i_num_r): if (i_r % (i_num_r / 10) == 0): print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / i_num_r, time.time() - f_t1) na_work = np.array([i_r], dtype='i') o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) #print "received results from core %i" % o_status.Get_source() o_comm.Send([na_work,MPI.INT], dest=o_status.Get_source(), tag=i_work_tag) na_cl21_data += na_result for i_rank_out in range(1,i_size): o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) na_cl21_data += na_result print "cl21_data = %.6f, na_result = %.6f" % (np.average(na_cl21_data), np.average(na_result)) o_comm.Send([np.array([9999], dtype='i'), MPI.INT], dest=o_status.Get_source(), tag=i_die_tag) #slave loop: else: while(1): o_comm.Recv([na_work, MPI.INT], source=0, status=o_status, tag=MPI.ANY_TAG) if (o_status.Get_tag() == i_die_tag): break i_r = na_work[0] #print "doing work for r = %i on core %i" % (i_r, i_rank) na_Alm = hp.almxfl(na_alm, na_alpha[:,i_r] / na_cltt * na_bl) na_Blm = hp.almxfl(na_alm, na_beta[:,i_r] / na_cltt * na_bl) # print ("doing work for r=%i, alpha=(%.2f), beta=(%.2f)" % # (int(na_r[i_r]), na_alpha[0,i_r], na_beta[0,i_r])) # f_t4 = time.time() na_An = hp.alm2map(na_Alm, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Bn = hp.alm2map(na_Blm, nside=i_nside, fwhm=0.00145444104333, verbose=False) # *REMBER TO MULTIPLY BY THE MASK!* -- already doing this in cltt.py... na_An = na_An * na_mask na_Bn = na_Bn * na_mask # f_t5 = time.time() #print "starting map2alm for r = %i on core %i" % (i_r, i_rank) na_B2lm = hp.map2alm(na_Bn*na_Bn, lmax=i_num_ell) na_ABlm = hp.map2alm(na_An*na_Bn, lmax=i_num_ell) #print "finished map2alm for r = %i on core %i" % (i_r, i_rank) # f_t6 = time.time() na_clAB2 = hp.alm2cl(na_Alm, na_B2lm, lmax=i_num_ell) na_clABB = hp.alm2cl(na_ABlm, na_Blm, lmax=i_num_ell) #na_clAB2 = na_clAB2[:-1] # just doing this to make things fit... #na_clABB = na_clABB[:-1] # just doing this to make things fit... na_clAB2 = na_clAB2[1:] na_clABB = na_clABB[1:] #f_t7 = time.time() na_result = np.zeros(i_num_ell, dtype='d') na_result += (na_clAB2 + 2 * na_clABB) * na_r[i_r]**2. * na_dr[i_r] print ("finished work for r=%i, avg(alpha)=%.2f, avg(beta)=%.2f, avg(result)=%.4g" % (int(na_r[i_r]), np.average(na_alpha[:,i_r]), np.average(na_beta[:,i_r]), np.average(na_result))) #print "finished work for r = %i on core %i" % (i_r, i_rank) o_comm.Send([na_result,MPI.DOUBLE], dest=0, tag=1) # print "Load time: %.2f s" % (f_t2 - f_t1) # print "synalm time: %.2f s" % (f_t3 - f_t2) # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) f_t8 = time.time() if (i_rank == 0): print "" print ("Saving power spectrum to %s (not mll corrected)" % s_fn_cl21_data_no_mll) np.savetxt(s_fn_cl21_data_no_mll, na_cl21_data) print "" print "Saving power spectrum to %s (mll corrected)" % s_fn_cl21_data na_cl21_data = np.dot(na_mll_inv, na_cl21_data) np.savetxt(s_fn_cl21_data, na_cl21_data) # print "Finished in %.2f s" % (f_t8 - f_t1) # # print "Load time: %.2f s" % (f_t2 - f_t1) # # print "synalm time: %.2f s" % (f_t3 - f_t2) # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) return
def main(run_type='data', nsim=0, fnl=0): ''' MPI Setup ''' o_comm = MPI.COMM_WORLD i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size) i_size = o_comm.Get_size() # number of cores assigned to run this program o_status = MPI.Status() i_work_tag = 0 i_die_tag = 1 ''' Loading and calculating power spectrum components ''' if (run_type == 'fnl'): nl = 1024 else: nl = 1499 if (run_type == 'data'): fn_map = h._fn_map elif (run_type == 'sim'): fn_map = 'output/map_sim_%i.fits' % nsim elif (run_type == 'fnl'): #print "fnl value: %i" % fnl fn_map = 'data/fnl_sims/map_fnl_%i_sim_%i.fits' % (int(fnl), nsim) # (1) read map (either map_data or map_sim), mask, mll, and create mll_inv; map_in = hp.read_map(fn_map) mask = hp.read_map(h._fn_mask) if (run_type == 'fnl'): mask = 1. fn_mll = 'output/na_mll_%i_lmax.npy' % nl mll = np.load(fn_mll) if (run_type == 'fnl'): mll = np.identity(nl) mll_inv = np.linalg.inv(mll) nside = hp.get_nside(map_in) if (i_rank == 0): if (run_type == 'data'): fn_cl21 = 'output/cl21_data.dat' fn_cl21_no_mll = 'output/cl21_data_no_mll.dat' elif (run_type == 'sim'): fn_cl21 = 'output/cl21_sim_%i.dat' % nsim fn_cl21_no_mll = 'output/cl21_no_mll_%i.dat' % nsim elif (run_type == 'fnl'): fn_cl21 = 'output/cl21_fnl_%i_sim_%i.dat' % (int(fnl), nsim) fn_cl21_no_mll = 'output/cl21_fnl_%i_sim_%i_no_mll.dat' % (int(fnl), nsim) f_t1 = time.time() print "" print "Run parameters:" print "(Using %i cores)" % i_size print "nl: %i, nside: %i, map: %s" % (nl, nside, fn_map) print "beam: %s, alpha_beta: %s, cltt: %s" % (h._fn_beam, h._fn_alphabeta, h._fn_cltt) print "" print "Loading ell, r, dr, alpha, beta, cltt, and beam..." # (2) normalize, remove mono-/dipole, and mask map to create map_masked; map_in /= (1e6 * 2.7) map_in = hp.remove_dipole(map_in) map_masked = map_in * mask # (3) create alm_masked (map2alm on map_masked), cltt_masked (anafast on # map_masked), and cltt_corrected (dot cltt_masked with mll_inv) if (run_type == 'data' or run_type == 'sim'): alm_masked = hp.map2alm(map_masked) elif (run_type == 'fnl'): fn_almg = ('data/fnl_sims/alm_l_%04d_v3.fits' % (nsim,)) almg = hp.read_alm(fn_almg) fn_almng = ('data/fnl_sims/alm_nl_%04d_v3.fits' % (nsim,)) almng = hp.read_alm(fn_almng) alm = almg + fnl * almng alm_masked = alm cltt_masked = hp.anafast(map_masked) cltt_masked = cltt_masked[:nl] cltt_corrected = np.dot(mll_inv, cltt_masked) # stuff with alpha, beta, r, dr, and beam l, r, dr, alpha, beta = np.loadtxt(h._fn_alphabeta, usecols=(0,1,2,3,4), unpack=True, skiprows=3) l = np.unique(l) r = np.unique(r)[::-1] nr = len(r) if (run_type == 'data' or run_type == 'sim'): cltt_denom = np.load('output/cltt_theory.npy') #replace with 'output/na_cltt.npy' cltt_denom = cltt_denom[:nl] elif (run_type == 'fnl'): cltt_denom = np.loadtxt('joe/cl_wmap5_bao_sn.dat', usecols=(1,), unpack=True) alpha = alpha.reshape(len(l), nr) beta = beta.reshape(len(l), nr) dr = dr.reshape(len(l), nr) dr = dr[0] if (run_type != 'fnl'): beam = np.load(h._fn_beam) else: #beam = np.ones(len(cltt_denom)) beam = np.load(h._fn_beam) noise = np.zeros(len(cltt_denom)) nlm = hp.synalm(noise, lmax=nl) ####### TEMPORARY -- change beam ####### #beam = np.ones(len(cltt_denom)) #noise = np.zeros(len(cltt_denom)) #nlm = hp.synalm(noise, lmax=nl) ######################################## l = l[:nl] beam = beam[:nl] alpha = alpha[:nl,:] beta = beta[:nl,:] if (i_rank == 0): print "nr: %i, nl: %i" % (nr, nl) f_t2 = time.time() if (i_rank == 0): print "" print "Time to create alms from maps: %.2f s" % (f_t2 - f_t1) print "Calculating full skewness power spectrum..." cl21 = np.zeros(nl) work = np.zeros(1, dtype='i') result = np.zeros(nl, dtype='d') # master loop if (i_rank == 0): # send initial jobs for i_rank_out in range(1,i_size): work = np.array([i_rank_out-1], dtype='i') o_comm.Send([work, MPI.INT], dest=i_rank_out, tag=i_work_tag) for i_r in range(i_size-1,nr): if (i_r % (nr / 10) == 0): print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / nr, time.time() - f_t2) work = np.array([i_r], dtype='i') o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) #print "received results from core %i" % o_status.Get_source() o_comm.Send([work,MPI.INT], dest=o_status.Get_source(), tag=i_work_tag) cl21 += result for i_rank_out in range(1,i_size): o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) cl21 += result print "cl21 = %.6f, result = %.6f" % (np.average(cl21), np.average(result)) o_comm.Send([np.array([9999], dtype='i'), MPI.INT], dest=o_status.Get_source(), tag=i_die_tag) #slave loop: else: while(1): o_comm.Recv([work, MPI.INT], source=0, status=o_status, tag=MPI.ANY_TAG) if (o_status.Get_tag() == i_die_tag): break i_r = work[0] #print ("i_r: %i" % i_r) #print ("alm_masked: ", alm_masked) #print ("cltt_denom: ", cltt_denom) #print ("beam: ", beam) #print ("mask: ", mask) #print ("fn_map: ", fn_map) # create Alm, Blm (almxfl with alm_masked and beta / cltt_denom # * beam, etc.) Alm = np.zeros(alm_masked.shape[0],complex) Blm = np.zeros(alm_masked.shape[0],complex) clAB2 = np.zeros(nl+1) clABB = np.zeros(nl+1) #Alm = hp.almxfl(alm_masked, alpha[:,i_r] / cltt_denom * beam) #Blm = hp.almxfl(alm_masked, beta[:,i_r] / cltt_denom * beam) #for li in xrange(2,nl): # I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1)) # Alm[I]=alpha[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li]) # Blm[I]=beta[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li]) if (run_type == 'fnl'): for li in xrange(2,nl): I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1)) Alm[I]=alpha[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li]) Blm[I]=beta[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li]) else: for li in xrange(2,nl): I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1)) Alm[I]=alpha[li-2][i_r]*(alm_masked[I])/cltt_denom[li]*beam[li] Blm[I]=beta[li-2][i_r]*(alm_masked[I])/cltt_denom[li]*beam[li] ############################# DEBUG ################################ if i_r == 0: cltt_Alm = hp.alm2cl(Alm) cltt_Blm = hp.alm2cl(Blm) np.savetxt('debug2/cltt_%s_Alm.dat' % run_type, cltt_Alm) np.savetxt('debug2/cltt_%s_Blm.dat' % run_type, cltt_Blm) #################################################################### #An = hp.alm2map(Alm, nside=nside, fwhm=0.00145444104333, # verbose=False) #Bn = hp.alm2map(Blm, nside=nside, fwhm=0.00145444104333, # verbose=False) An = hp.alm2map(Alm, nside=nside) Bn = hp.alm2map(Blm, nside=nside) ############################# DEBUG ################################ if i_r == 0: cltt_An = hp.anafast(An) cltt_Bn = hp.anafast(Bn) np.savetxt('debug2/cltt_%s_An.dat' % run_type, cltt_An) np.savetxt('debug2/cltt_%s_Bn.dat' % run_type, cltt_Bn) #################################################################### An = An * mask Bn = Bn * mask ############################# DEBUG ################################ #if i_r == 0: # print "saving alpha, beta for %i" % i_r # np.savetxt('debug2/alpha_ir_%i' % i_r, alpha[:,i_r]) # np.savetxt('debug2/beta_ir_%i' % i_r, beta[:,i_r]) # print "(An * Bn)[:10] == An[:10] * Bn[:10]:", (An * Bn)[:10] == An[:10] * Bn[:10] #################################################################### B2lm = hp.map2alm(Bn*Bn, lmax=nl) ABlm = hp.map2alm(An*Bn, lmax=nl) ############################# DEBUG ################################ if i_r == 0: cltt_B2lm = hp.alm2cl(B2lm) cltt_ABlm = hp.alm2cl(ABlm) np.savetxt('debug2/cltt_%s_B2lm.dat' % run_type, cltt_B2lm) np.savetxt('debug2/cltt_%s_ABlm.dat' % run_type, cltt_ABlm) #################################################################### #clAB2 = hp.alm2cl(Alm, B2lm, lmax=nl) #clABB = hp.alm2cl(ABlm, Blm, lmax=nl) for li in xrange(2,nl+1): I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1)) clAB2[li] = (Alm[I[0]]*B2lm[I[0]].conj() +2.*sum(Alm[I[1:]]*B2lm[I[1:]].conj()))/(2.0*li+1.0) clABB[li] = (Blm[I[0]]*ABlm[I[0]].conj() +2.*sum(Blm[I[1:]]*ABlm[I[1:]].conj()))/(2.0*li+1.0) ############################# DEBUG ################################ if i_r == 0: np.savetxt('debug2/clAB2_%s.dat' % run_type, clAB2) np.savetxt('debug2/clABB_%s.dat' % run_type, clABB) #################################################################### clAB2 = clAB2[1:] clABB = clABB[1:] result = np.zeros(nl, dtype='d') result += (clAB2 + 2 * clABB) * r[i_r]**2. * dr[i_r] ############################# DEBUG ################################ np.savetxt('debug2/cl21_%s.dat' % run_type, result) #################################################################### print ("finished work for r=%i, dr=%.2f, avg(alpha)=%.2f, avg(beta)=%.2f, avg(result)=%.4g" % (int(r[i_r]), dr[i_r], np.average(alpha[:,i_r]), np.average(beta[:,i_r]), np.average(result))) ############################# DEBUG ################################ #if i_r == 0: # print "finished debug -- goodbye!" # exit() #################################################################### o_comm.Send([result,MPI.DOUBLE], dest=0, tag=1) f_t8 = time.time() if (i_rank == 0): print "" print ("Saving power spectrum to %s (not mll corrected)" % fn_cl21_no_mll) np.savetxt(fn_cl21_no_mll, cl21) print "" print "Saving power spectrum to %s (mll corrected)" % fn_cl21 cl21 = np.dot(mll_inv, cl21) np.savetxt(fn_cl21, cl21) return
paper_rich = pb.bradley_paper_polarized_beams(bradley_paper_file) pbxx = paper_rich['xx'] pbxx = hpt.rotate_healpix_map(pbxx,[0,-120]) #hp.mollview(pbxx) #hp.mollview(hpt.rotate_healpix_map(pbxx,[0,-120])) #hp.graticule() #plt.show() npix = pbxx.size nside = hp.npix2nside(npix) lmax = 3*nside-1 alm_pbxx = hp.map2alm(pbxx,lmax=lmax) cl_pbxx = hp.alm2cl(alm_pbxx) l,m= hp.Alm.getlm(lmax) rot = np.exp(-1j*np.radians(45.)*m) rotmap = hp.alm2map(alm_pbxx*rot,nside) #hp.mollview(pbxx) #hp.mollview(rotmap) #plt.show() xydip = pb.xy_ideal_dipole() # F = f_theta(theta,phi) theta_hat + f_phi phi_hat # theta_hat = ()xhat + ()yhat + ()zhat Baltaz = np.array([[np.zeros(npix)],[xydip['xt']],[xydip['xp']]]) hprot = [0,90]
def measure_dlcl(mode='dl',frequency=353, mask=None, lmax=600,lmin=40, put_mask=True, psky=70, mask_sources=True, apodization=2, beam=None,beamP=None, Imap=None,Qmap=None,Umap=None, Imap2=None,Qmap2=None,Umap2=None, fsky_correction=True): """ If mode=='dl', returns D quantity = Cl *ls*(ls+1)/2./np.pi*1e12 [units: uK_CMB^2] """ if put_mask: if mask is None: print 'reading masks...' mask = pf.get_planck_mask(psky=psky, mask_sources=mask_sources, apodization=apodization) else: mask = map.copy()*0. + 1. if (beam is None) or (beamP is None): print 'reading beams...' beam_file = pf.PLANCK_DATA_PATH+'HFI_RIMO_Beams-100pc_R2.00.fits' hdulist = pf.fits.open(beam_file) beam = hdulist[pf.BEAM_INDEX['{}'.format(frequency)]].data.NOMINAL[0][:lmax+1] beamP = hdulist[pf.BEAM_INDEX['{}P'.format(frequency)]].data.NOMINAL[0][:lmax+1] beam = beam[lmin:lmax+1] beamP = beamP[lmin:lmax+1] fsky = mask.sum() / len(mask) ls = np.arange(lmin, lmax+1) if mode == 'dl': factor = ls * (ls+1) / (2.*np.pi) * 1e12 / fsky if mode == 'cl': factor = 1. / fsky if fsky_correction: fcfilename = pf.FGS_RESULTS_PATH + 'fskycorr_fg_psky{}_apo{}_lmax1000_TT_EE_BB.npy'.format(psky,apodization) if os.path.exists(fcfilename): fcorr = np.load(fcfilename) fcorr_TT = fcorr[0][lmin:lmax+1] fcorr_EE = fcorr[1][lmin:lmax+1] fcorr_BB = fcorr[2][lmin:lmax+1] else: fcorr = None else: fcorr = None if (Imap is None) or (Qmap is None) or (Umap is None): print 'reading maps...' mapname1 = pf.PLANCK_DATA_PATH + 'HFI_SkyMap_{}_2048_R2.02_halfmission-1.fits'.format(frequency) mapname2 = pf.PLANCK_DATA_PATH + 'HFI_SkyMap_{}_2048_R2.02_halfmission-2.fits'.format(frequency) Imap = hp.read_map(mapname1,field=0) Imap2 = hp.read_map(mapname2,field=0) Qmap, Umap = hp.read_map(mapname1,field=(1,2)) Qmap2, Umap2 = hp.read_map(mapname2,field=(1,2)) Tlm1 = hp.map2alm(Imap*mask, lmax=lmax) Elm1, Blm1 = hp.map2alm_spin( (Qmap*mask, Umap*mask), 2, lmax=lmax ) if (Imap2 is None) or (Qmap2 is None) or (Umap2 is None): Tlm2 = Tlm1 Elm2 = Elm1 Blm2 = Blm1 else: Tlm2 = hp.map2alm(Imap2*mask, lmax=lmax) Elm2, Blm2 = hp.map2alm_spin( (Qmap2*mask,Umap2*mask), 2, lmax=lmax ) TT = hp.alm2cl(Tlm1, Tlm2) EE = hp.alm2cl(Elm1, Elm2) BB = hp.alm2cl(Blm1, Blm2) EE = EE[lmin:] * factor / beamP**2 TT = TT[lmin:] * factor / beam**2 BB = BB[lmin:] * factor / beamP**2 TE = hp.alm2cl(Tlm1, Elm2) EB = hp.alm2cl(Blm1, Elm2) TB = hp.alm2cl(Blm1, Tlm2) TE = TE[lmin:] * factor / beam / beamP TB = TB[lmin:] * factor / beam / beamP EB = EB[lmin:] * factor / beamP**2 if fcorr is not None: TT *= fcorr_TT EE *= fcorr_EE BB *= fcorr_BB return ls, TT, EE, BB, TE, TB, EB
# for i in xrange(CAB2l.shape[1]): # for j in xrange(CAB2l.shape[0]): # clab2[i] += dR*R[j][i]*R[j][i]*CAB2l[j][i] # clabb[i] += dR*R[j][i]*R[j][i]*CABBl[j][i] cloc = (clab2[r] + 2 * clabb[r]) * (R[r]) ** 2.0 * dR[r] ############## DEBUG ############## clAB2_jon = clAB2_jon[1:] clABB_jon = clABB_jon[1:] result_jon = zeros(nl, dtype="d") result_jon += (clAB2_jon + 2 * clABB_jon) * (R[r]) ** 2.0 * dR[r] ################################### cltt_Alm_jon = hp.alm2cl(Alm_jon) cltt_Blm_jon = hp.alm2cl(Blm_jon) cltt_An = hp.anafast(An) cltt_Bn = hp.anafast(Bn) cltt_B2lm_jon = hp.alm2cl(B2lm_jon) cltt_ABlm_jon = hp.alm2cl(ABlm_jon) savetxt("../debug2/cltt_joe_Alm.dat", cltt_Alm_jon) savetxt("../debug2/cltt_joe_Blm.dat", cltt_Blm_jon) savetxt("../debug2/cltt_joe_An.dat", cltt_An) savetxt("../debug2/cltt_joe_Bn.dat", cltt_Bn) savetxt("../debug2/cltt_joe_B2lm.dat", cltt_B2lm_jon) savetxt("../debug2/cltt_joe_ABlm.dat", cltt_ABlm_jon) savetxt("../debug2/clAB2_joe.dat", clAB2_jon) savetxt("../debug2/clABB_joe.dat", clABB_jon) print ("r: %i" % R[r])
prt("Writing gradient") healpy.write_map("%s/grad%03d_%d.fits" % (args.odir, sim, args.order), [phi_dtheta, phi_dphi]) del aphi prt("Computing lensed positions") opos, rot = offset_pos(ipos, phi_dtheta, phi_dphi, pol=args.pol, geodesic=args.geodesic) del phi, phi_dtheta, phi_dphi prt("Starting taylor expansion") # Interpolate maps one at a time maps = [] for comp in cmb: for m in taylor_interpol_iter(comp, opos, args.order, verbose=args.verbose, lmax=args.lmax): pass maps.append(m) del opos, cmb prt("Rotating") rm = apply_rotation(maps, rot) if "l" in args.output: prt("Writing lensed map") healpy.write_map("%s/lcmb%03d_%d.fits" % (args.odir, sim, args.order), rm) if "s" in args.output: prt("Computing power spectrum") alm = healpy.map2alm(rm, use_weights=True, iter=1) spec = np.array(healpy.alm2cl(alm)) if spec.ndim == 1: spec = np.reshape(spec, [1,spec.size]) n = spec.shape[1] l = np.arange(spec.shape[1]) spec[:,1:] *= l[1:n]*(l[1:n]+1)/(2*np.pi) prt("Writing spectrum") np.savetxt("%s/spec%03d_%d.txt" % (args.odir, sim, args.order), spec.T, fmt=" %15.7e")
def main(run_type="data", nsim=0, fnl=0): """ MPI Setup """ o_comm = MPI.COMM_WORLD i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size) i_size = o_comm.Get_size() # number of cores assigned to run this program o_status = MPI.Status() i_work_tag = 0 i_die_tag = 1 """ Loading and calculating power spectrum components """ if run_type == "fnl": nl = 1024 else: nl = 1499 if run_type == "data": fn_map = h._fn_map elif run_type == "sim": fn_map = "output/map_sim_%i.fits" % nsim elif run_type == "fnl": # print "fnl value: %i" % fnl fn_map = "data/fnl_sims/map_fnl_%i_sim_%i.fits" % (int(fnl), nsim) # (1) read map (either map_data or map_sim), mask, mll, and create mll_inv; map_in = hp.read_map(fn_map) mask = hp.read_map(h._fn_mask) if run_type == "fnl": mask = 1.0 fn_mll = "output/na_mll_%i_lmax.npy" % nl mll = np.load(fn_mll) if run_type == "fnl": mll = np.identity(nl) mll_inv = np.linalg.inv(mll) nside = hp.get_nside(map_in) if i_rank == 0: if run_type == "data": fn_cl21 = "output/cl21_data.dat" fn_cl21_no_mll = "output/cl21_data_no_mll.dat" elif run_type == "sim": fn_cl21 = "output/cl21_sim_%i.dat" % nsim fn_cl21_no_mll = "output/cl21_no_mll_%i.dat" % nsim elif run_type == "fnl": fn_cl21 = "output/cl21_fnl_%i_sim_%i.dat" % (int(fnl), nsim) fn_cl21_no_mll = "output/cl21_fnl_%i_sim_%i_no_mll.dat" % (int(fnl), nsim) f_t1 = time.time() print "" print "Run parameters:" print "(Using %i cores)" % i_size print "nl: %i, nside: %i, map: %s" % (nl, nside, fn_map) print "beam: %s, alpha_beta: %s, cltt: %s" % (h._fn_beam, h._fn_alphabeta, h._fn_cltt) print "" print "Loading ell, r, dr, alpha, beta, cltt, and beam..." # (2) normalize, remove mono-/dipole, and mask map to create map_masked; map_in /= 1e6 * 2.7 map_in = hp.remove_dipole(map_in) map_masked = map_in * mask # (3) create alm_masked (map2alm on map_masked), cltt_masked (anafast on # map_masked), and cltt_corrected (dot cltt_masked with mll_inv) alm_masked = hp.map2alm(map_masked) alm_masked = alm_masked[: hp.Alm.getsize(nl)] cltt_masked = hp.anafast(map_masked) cltt_masked = cltt_masked[:nl] cltt_corrected = np.dot(mll_inv, cltt_masked) # stuff with alpha, beta, r, dr, and beam l, r, dr, alpha, beta = np.loadtxt(h._fn_alphabeta, usecols=(0, 1, 2, 3, 4), unpack=True, skiprows=3) l = np.unique(l) r = np.unique(r)[::-1] nr = len(r) cltt_denom = np.load("output/cltt_theory.npy") # replace with 'output/na_cltt.npy' cltt_denom = cltt_denom[:nl] alpha = alpha.reshape(len(l), nr) beta = beta.reshape(len(l), nr) dr = dr.reshape(len(l), nr) dr = dr[0] if run_type != "fnl": beam = np.load(h._fn_beam) else: beam = np.ones(len(cltt_denom)) l = l[:nl] beam = beam[:nl] alpha = alpha[:nl, :] beta = beta[:nl, :] if i_rank == 0: print "nr: %i, nl: %i" % (nr, nl) f_t2 = time.time() if i_rank == 0: print "" print "Time to create alms from maps: %.2f s" % (f_t2 - f_t1) print "Calculating full skewness power spectrum..." cl21 = np.zeros(nl) work = np.zeros(1, dtype="i") result = np.zeros(nl, dtype="d") # master loop if i_rank == 0: # send initial jobs for i_rank_out in range(1, i_size): work = np.array([i_rank_out - 1], dtype="i") o_comm.Send([work, MPI.INT], dest=i_rank_out, tag=i_work_tag) for i_r in range(i_size - 1, nr): if i_r % (nr / 10) == 0: print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / nr, time.time() - f_t2) work = np.array([i_r], dtype="i") o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) # print "received results from core %i" % o_status.Get_source() o_comm.Send([work, MPI.INT], dest=o_status.Get_source(), tag=i_work_tag) cl21 += result for i_rank_out in range(1, i_size): o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) cl21 += result print "cl21 = %.6f, result = %.6f" % (np.average(cl21), np.average(result)) o_comm.Send([np.array([9999], dtype="i"), MPI.INT], dest=o_status.Get_source(), tag=i_die_tag) # slave loop: else: while 1: o_comm.Recv([work, MPI.INT], source=0, status=o_status, tag=MPI.ANY_TAG) if o_status.Get_tag() == i_die_tag: break i_r = work[0] # create Alm, Blm (almxfl with alm_masked and beta / cltt_denom # * beam, etc.) Alm = hp.almxfl(alm_masked, alpha[:, i_r] / cltt_denom * beam) Blm = hp.almxfl(alm_masked, beta[:, i_r] / cltt_denom * beam) ############################# DEBUG ################################ # cltt_Alm = hp.alm2cl(Alm) # cltt_Blm = hp.alm2cl(Blm) # np.savetxt('debug2/cltt_%s_Alm.dat' % run_type, cltt_Alm) # np.savetxt('debug2/cltt_%s_Blm.dat' % run_type, cltt_Blm) #################################################################### An = hp.alm2map(Alm, nside=nside, fwhm=0.00145444104333, verbose=False) Bn = hp.alm2map(Blm, nside=nside, fwhm=0.00145444104333, verbose=False) ############################# DEBUG ################################ # cltt_An = hp.anafast(An) # cltt_Bn = hp.anafast(Bn) # np.savetxt('debug2/cltt_%s_An.dat' % run_type, cltt_An) # np.savetxt('debug2/cltt_%s_Bn.dat' % run_type, cltt_Bn) #################################################################### An = An * mask Bn = Bn * mask ############################# DEBUG ################################ if i_r == 100: print "saving alpha, beta for %i" % i_r np.savetxt("debug2/alpha_ir_%i" % i_r, alpha[:, i_r]) np.savetxt("debug2/beta_ir_%i" % i_r, beta[:, i_r]) print "(An * Bn)[:10] == An[:10] * Bn[:10]:", (An * Bn)[:10] == An[:10] * Bn[:10] #################################################################### B2lm = hp.map2alm(Bn * Bn, lmax=nl) ABlm = hp.map2alm(An * Bn, lmax=nl) ############################# DEBUG ################################ # cltt_B2lm = hp.alm2cl(B2lm) # cltt_ABlm = hp.alm2cl(ABlm) # np.savetxt('debug2/cltt_%s_B2lm.dat' % run_type, cltt_B2lm) # np.savetxt('debug2/cltt_%s_ABlm.dat' % run_type, cltt_ABlm) #################################################################### clAB2 = hp.alm2cl(Alm, B2lm, lmax=nl) clABB = hp.alm2cl(ABlm, Blm, lmax=nl) ############################# DEBUG ################################ # np.savetxt('debug2/clAB2_%s.dat' % run_type, clAB2) # np.savetxt('debug2/clABB_%s.dat' % run_type, clABB) #################################################################### clAB2 = clAB2[1:] clABB = clABB[1:] result = np.zeros(nl, dtype="d") result += (clAB2 + 2 * clABB) * r[i_r] ** 2.0 * dr[i_r] ############################# DEBUG ################################ # np.savetxt('debug2/cl21_%s.dat' % run_type, result) #################################################################### print ( "finished work for r=%i, dr=%.2f, avg(alpha)=%.2f, avg(beta)=%.2f, avg(result)=%.4g" % (int(r[i_r]), dr[i_r], np.average(alpha[:, i_r]), np.average(beta[:, i_r]), np.average(result)) ) ############################# DEBUG ################################ # print "finished debug -- goodbye!" # exit() #################################################################### o_comm.Send([result, MPI.DOUBLE], dest=0, tag=1) f_t8 = time.time() if i_rank == 0: print "" print ("Saving power spectrum to %s (not mll corrected)" % fn_cl21_no_mll) np.savetxt(fn_cl21_no_mll, cl21) print "" print "Saving power spectrum to %s (mll corrected)" % fn_cl21 cl21 = np.dot(mll_inv, cl21) np.savetxt(fn_cl21, cl21) return
def main(i_sim=1): ''' MPI Setup ''' o_comm = MPI.COMM_WORLD i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size) i_size = o_comm.Get_size() # number of cores assigned to run this program o_status = MPI.Status() i_work_tag = 0 i_die_tag = 1 ''' Loading and calculating power spectrum components ''' # Get run parameters s_fn_params = 'data/params.pkl' (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params) #s_fn_cltt = ('data/fnl_sims/cl_fnl_0_sim_%04d.fits' % (i_sim,)) f_fnl = 1.0 if (i_rank == 0): s_fn_cl21_data = ('data/cl21_fnl_sims/cl21_fnl_%i_sim_%04d.dat' % (int(f_fnl), i_sim)) s_fn_cl21_data_no_mll = ('data/cl21_fnl_sims/cl21_fnl_%i_sim_%04d_no_mll.dat' % (int(f_fnl), i_sim)) f_t1 = time.time() print "" print "Run parameters:" print "(Using %i cores)" % i_size print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name) print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt) print "" print "Loading ell, r, dr, alpha, beta, cltt, and beam..." i_lmax = 1024 na_mask = hp.read_map(s_fn_mask) s_fn_mll = 'output/na_mll_%i_lmax.npy' % i_lmax na_mll = np.load(s_fn_mll) na_mll_inv = np.linalg.inv(na_mll) na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, usecols=(0,1,2,3,4), unpack=True, skiprows=3) na_l = np.unique(na_l) na_r = np.unique(na_r)[::-1] na_l = na_l[:i_lmax] i_num_ell = len(na_l) i_num_r = len(na_r) if (i_rank == 0): print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell) # assuming number of r's is set correctly and number of ells isn't necessarily # related to lmax, i_num_ell, etc. i_num_ell_alpha_beta_r = len(na_alpha)/i_num_r na_alpha = na_alpha.reshape(i_num_ell_alpha_beta_r, i_num_r) na_beta = na_beta.reshape(i_num_ell_alpha_beta_r, i_num_r) na_dr = na_dr.reshape(i_num_ell_alpha_beta_r, i_num_r) na_dr = na_dr[0] na_alpha = na_alpha[:i_num_ell,:] na_beta = na_beta[:i_num_ell,:] na_dr = na_dr[:i_num_ell] try: na_cltt = np.load(s_fn_cltt) except: na_cltt = np.loadtxt(s_fn_cltt) na_cltt = na_cltt[:i_num_ell] na_bl = np.load(s_fn_beam) na_bl = na_bl[:i_num_ell] # f_t2 = time.time() if (i_rank == 0): print "" print "Calculating full skewness power spectrum..." #na_alm = hp.synalm(na_cltt, lmax=i_num_ell, verbose=False) s_fn_almg = ('data/fnl_sims/alm_l_%04d_v3.fits' % (i_sim,)) na_almg = hp.read_alm(s_fn_almg) na_almg = na_almg[:hp.Alm.getsize(i_num_ell)] s_fn_almng = ('data/fnl_sims/alm_nl_%04d_v3.fits' % (i_sim,)) na_almng = hp.read_alm(s_fn_almng) na_almng = na_almng[:hp.Alm.getsize(i_num_ell)] na_alm = na_almg + f_fnl * na_almng # f_t3 = time.time() na_cl21_data = np.zeros(i_num_ell) na_work = np.zeros(1, dtype='i') na_result = np.zeros(i_num_ell, dtype='d') # if (i_rank == 0): # print ("na_alm: %s, na_alpha: %s, na_beta: %s, na_cltt: %s, na_bl: %s" % # (str(np.shape(na_alm)), str(np.shape(na_alpha)), # str(np.shape(na_beta)), str(np.shape(na_cltt)), # str(np.shape(na_bl)) )) # else: # print "core: %i" % i_rank # print ("na_alm: %s, na_alpha: %s, na_beta: %s, na_cltt: %s, na_bl: %s" % # (str(np.shape(na_alm)), str(np.shape(na_alpha)), # str(np.shape(na_beta)), str(np.shape(na_cltt)), # str(np.shape(na_bl)) )) # master loop if (i_rank == 0): # send initial jobs # print ("na_alm: %s, na_alpha: %s, na_beta: %s, na_cltt: %s, na_bl: %s" % # (str(np.shape(na_alm)), str(np.shape(na_alpha)), # str(np.shape(na_beta)), str(np.shape(na_cltt)), # str(np.shape(na_bl)) )) for i_rank_out in range(1,i_size): na_work = np.array([i_rank_out-1], dtype='i') o_comm.Send([na_work, MPI.INT], dest=i_rank_out, tag=i_work_tag) for i_r in range(i_size-1,i_num_r): if (i_r % (i_num_r / 10) == 0): print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / i_num_r, time.time() - f_t1) na_work = np.array([i_r], dtype='i') o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) #print "received results from core %i" % o_status.Get_source() o_comm.Send([na_work,MPI.INT], dest=o_status.Get_source(), tag=i_work_tag) na_cl21_data += na_result for i_rank_out in range(1,i_size): o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, status=o_status, tag=MPI.ANY_TAG) na_cl21_data += na_result print "cl21 so far..." print na_cl21_data[:10], na_cl21_data[10:] o_comm.Send([np.array([9999], dtype='i'), MPI.INT], dest=o_status.Get_source(), tag=i_die_tag) #slave loop: else: while(1): o_comm.Recv([na_work, MPI.INT], source=0, status=o_status, tag=MPI.ANY_TAG) if (o_status.Get_tag() == i_die_tag): break i_r = na_work[0] #print "doing work for r = %i on core %i" % (i_r, i_rank) # print ("na_alm: %s, na_alpha: %s, na_beta: %s, na_cltt: %s, na_bl: %s" % # (str(np.shape(na_alm)), str(np.shape(na_alpha)), # str(np.shape(na_beta)), str(np.shape(na_cltt)), # str(np.shape(na_bl)) )) na_Alm = hp.almxfl(na_alm, np.nan_to_num(na_alpha[:,i_r] / na_cltt) * na_bl) na_Blm = hp.almxfl(na_alm, np.nan_to_num(na_beta[:,i_r] / na_cltt) * na_bl) # f_t4 = time.time() na_An = hp.alm2map(na_Alm, nside=i_nside, fwhm=0.00145444104333, verbose=False) na_Bn = hp.alm2map(na_Blm, nside=i_nside, fwhm=0.00145444104333, verbose=False) # *REMBER TO MULTIPLY BY THE MASK!* na_An = na_An * na_mask na_Bn = na_Bn * na_mask # f_t5 = time.time() #print "starting map2alm for r = %i on core %i" % (i_r, i_rank) na_B2lm = hp.map2alm(na_Bn*na_Bn, lmax=i_num_ell) na_ABlm = hp.map2alm(na_An*na_Bn, lmax=i_num_ell) #print "finished map2alm for r = %i on core %i" % (i_r, i_rank) # f_t6 = time.time() na_clAB2 = hp.alm2cl(na_Alm, na_B2lm, lmax=i_num_ell) na_clABB = hp.alm2cl(na_ABlm, na_Blm, lmax=i_num_ell) na_clAB2 = na_clAB2[1:] na_clABB = na_clABB[1:] #f_t7 = time.time() # print ("na_clAB2: %s, na_clABB: %s, na_r: %s, na_dr: %s" % # (str(np.shape(na_clAB2)), str(np.shape(na_clABB)), # str(np.shape(na_r)), str(np.shape(na_dr)) )) na_result = np.zeros(i_num_ell, dtype='d') na_result += (na_clAB2 + 2 * na_clABB) * na_r[i_r]**2. * na_dr[i_r] #print "finished work for r = %i on core %i" % (i_r, i_rank) o_comm.Send([na_result,MPI.DOUBLE], dest=0, tag=1) # print "Load time: %.2f s" % (f_t2 - f_t1) # print "synalm time: %.2f s" % (f_t3 - f_t2) # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) f_t8 = time.time() if (i_rank == 0): print "" print ("Saving power spectrum to %s (not mll corrected)" % s_fn_cl21_data_no_mll) np.savetxt(s_fn_cl21_data_no_mll, na_cl21_data) print "" print "Saving power spectrum to %s (mll corrected)" % s_fn_cl21_data na_cl21_data = np.dot(na_mll_inv, na_cl21_data) np.savetxt(s_fn_cl21_data, na_cl21_data) # print "Finished in %.2f s" % (f_t8 - f_t1) # # print "Load time: %.2f s" % (f_t2 - f_t1) # # print "synalm time: %.2f s" % (f_t3 - f_t2) # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.) # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.) # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.) # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.) return
bl = np.load('output/na_bl.npy') ir = 10 alm_data = alm_data[:hp.Alm.getsize(nl)] alm_sim = alm_sim[:hp.Alm.getsize(nl)] cltt_corrected = cltt_corrected[:nl] bl = bl[:nl] #data Alm_data = hp.almxfl(alm_data, alpha[:,ir] / cltt_corrected * bl) Blm_data = hp.almxfl(alm_data, beta[:,ir] / cltt_corrected * bl) # make cls and save them away cltt_data_Alm = hp.alm2cl(Alm_data) cltt_data_Blm = hp.alm2cl(Blm_data) np.savetxt('debug/cltt_data_Alm.dat', cltt_data_Alm) np.savetxt('debug/cltt_data_Blm.dat', cltt_data_Blm) An_data = hp.alm2map(Alm_data, nside=2048, fwhm=0.00145444104333, verbose=False) Bn_data = hp.alm2map(Blm_data, nside=2048, fwhm=0.00145444104333, verbose=False) # make cls and save them away cltt_data_An = hp.anafast(An_data) cltt_data_Bn = hp.anafast(Bn_data) np.savetxt('debug/cltt_data_An.dat', cltt_data_An) np.savetxt('debug/cltt_data_Bn.dat', cltt_data_Bn) B2lm_data = hp.map2alm(Bn_data*Bn_data, lmax=nl) ABlm_data = hp.map2alm(An_data*Bn_data, lmax=nl)