def setUp(self): self.lmax = 64 self.path = os.path.dirname( os.path.realpath( __file__ ) ) self.map1 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_W_v4_udgraded32.fits'), (0,1,2))] self.map2 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_V_v4_udgraded32.fits'), (0,1,2))] self.mask = hp.read_map(os.path.join(self.path, 'data', 'wmap_temperature_analysis_mask_r9_7yr_v4_udgraded32.fits')).astype(np.bool) for m in chain(self.map1, self.map2): m.mask = np.logical_not(self.mask) self.cla = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter.fits')) self.cl_fortran_nomask = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter_nomask.fits')) cls_file = pf.open(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_IQU_lmax64_rmmono_3iter.fits')) # fix for pyfits to read the file with duplicate column names for i in range(2, 6): cls_file[1].header['TTYPE%d' % i] += '-%d' % i cls = cls_file[1].data # order of HEALPIX is TB, EB while in healpy is EB, TB self.cliqu = [np.array(cls.field(i)) for i in (0,1,2,3,5,4)] nside = 32 lmax = 64 fwhm_deg = 7. seed = 12345 np.random.seed(seed) self.mapiqu = hp.synfast(self.cliqu, nside, lmax=lmax, pixwin=False, fwhm=np.radians(fwhm_deg), new=False)
def setUp(self): self.lmax = 64 self.path = os.path.dirname( os.path.realpath( __file__ ) ) self.map1 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_W_v4_udgraded32.fits'), (0,1,2))] self.map2 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_V_v4_udgraded32.fits'), (0,1,2))] self.mask = hp.read_map(os.path.join(self.path, 'data', 'wmap_temperature_analysis_mask_r9_7yr_v4_udgraded32.fits')).astype(np.bool) for m in chain(self.map1, self.map2): m.mask = np.logical_not(self.mask) self.cla = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter.fits')) self.cl_fortran_nomask = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter_nomask.fits')) cls_file = pyfits.open(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_IQU_lmax64_rmmono_3iter.fits')) # fix for pyfits to read the file with duplicate column names for i in range(2, 6): cls_file[1].header['TTYPE%d' % i] += '-%d' % i cls = cls_file[1].data # order of HEALPIX is TB, EB while in healpy is EB, TB self.cliqu = [np.array(cls.field(i)) for i in (0,1,2,3,5,4)] nside = 32 lmax = 64 fwhm_deg = 7. seed = 12345 np.random.seed(seed) self.mapiqu = hp.synfast(self.cliqu, nside, lmax=lmax, pixwin=False, fwhm=np.radians(fwhm_deg), new=False)
def _get_Cl_cmb(Alens=1., r=0.): power_spectrum = hp.read_cl(CMB_CL_FILE % 'lensed_scalar')[:, :4000] if Alens != 1.: power_spectrum[2] *= Alens if r: power_spectrum += r * hp.read_cl( CMB_CL_FILE % 'unlensed_scalar_and_tensor_r1')[:, :4000] return power_spectrum
def plot_mc(): theory1=hp.read_cl('cl_theory_FR_QxaU.fits') theory2=hp.read_cl('cl_theory_FR_UxaQ.fits') bins=[1,5,10,20,50] f=open('cl_array_FR_QxaU.json','r') cross1_array=json.load(f) f.close() f=open('cl_array_FR_UxaQ.json','r') cross2_array=json.load(f) f.close() f=open('cl_noise_FR_QxaU.json','r') noise1_array=json.load(f) f.close() f=open('cl_noise_FR_UxaQ.json','r') noise2_array=json.load(f) f.close() # bl_27=hp.gauss_beam(27.3*(np.pi/(180*60.)),lmax=383) # bl_11=hp.gauss_beam(11.7*(np.pi/(180*60.)),lmax=383) # bl_factor=bl_11*bl_27 L=15*(np.pi/180.) k=np.arange(1,500*(L/(2*np.pi))) l=2*np.pi*k/L cross1=np.mean(cross1_array,axis=0) noise1=np.mean(noise1_array,axis=0) dcross1=np.std(cross1_array,axis=0) plt.figure() plt.plot(l,l*(l+1)/(2*np.pi)*theory1,'r-') plt.plot(l,l*(l+1)/(2*np.pi)*(cross1-noise1),'k') plt.errorbar(l,l*(l+1)/(2*np.pi)*(cross1-noise1),yerr=l*(l+1)/(2*np.pi)*dcross1,color='black') plt.title('Cross 43x95 FR QxaU') plt.ylabel('$\\frac{\ell(\ell+1)}{2\pi}C_{\ell}\ \\frac{\mu K^{2}rad}{m^{4}}$') plt.xlabel('$\ell$') plt.savefig('Cross_43x95_FR_QxaU_flat.eps') plt.savefig('Cross_43x95_FR_QxaU_flat.png',format='png') cross2=np.mean(cross2_array,axis=0) noise2=np.mean(noise2_array,axis=0) dcross2=np.std(cross2_array,axis=0) plt.figure() plt.plot(l,l*(l+1)/(2*np.pi)*theory2,'r-') plt.plot(l,l*(l+1)/(2*np.pi)*(cross2-noise2),'k') plt.errorbar(l,l*(l+1)/(2*np.pi)*(cross2-noise2),yerr=l*(l+1)/(2*np.pi)*dcross2,color='black') plt.title('Cross 43x95 FR UxaQ') plt.ylabel('$\\frac{\ell(\ell+1)}{2\pi}C_{\ell}\ \\frac{\mu K^{2}rad}{m^{4}}$') plt.xlabel('$\ell$') plt.savefig('Cross_43x95_FR_UxaQ_flat.eps') plt.savefig('Cross_43x95_FR_UxaQ_flat.png',format='png')
def setUp(self): self.path = os.path.dirname( os.path.realpath( __file__ ) ) try: self.map1 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_W_v4.fits'), (0,1,2))] self.map2 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_V_v4.fits'), (0,1,2))] self.mask = hp.read_map(os.path.join(self.path, 'data', 'wmap_temperature_analysis_mask_r9_7yr_v4.fits')).astype(np.bool) except exceptions.IOError: warnings.warn("""Missing Wmap test maps from the data folder, please download them from Lambda and copy them in the test/data folder: http://lambda.gsfc.nasa.gov/data/map/dr4/skymaps/7yr/raw/wmap_band_iqumap_r9_7yr_W_v4.fits http://lambda.gsfc.nasa.gov/data/map/dr4/skymaps/7yr/raw/wmap_band_iqumap_r9_7yr_V_v4.fits http://lambda.gsfc.nasa.gov/data/map/dr4/ancillary/masks/wmap_temperature_analysis_mask_r9_7yr_v4.fits on Mac or Linux you can run the bash script get_wmap_maps.sh from the same folder """) for m in chain(self.map1, self.map2): m.mask = np.logical_not(self.mask) self.cla = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_fortran.fits')) cls = pyfits.open(os.path.join(self.path, 'data', 'cl_iqu_wmap_fortran.fits'))[1].data # order of HEALPIX is TB, EB while in healpy is EB, TB self.cliqu = [cls.field(i) for i in (0,1,2,3,5,4)] nside = 32 lmax = 64 fwhm_deg = 7. seed = 12345 np.random.seed(seed) self.mapiqu = hp.synfast(self.cliqu, nside, lmax=lmax, pixwin=False, fwhm=np.radians(fwhm_deg), new=False)
def test_smoothcombine(): nside = 256 freq = 30 m = hp.ma(np.random.standard_normal(hp.nside2npix(nside))) m.mask = np.zeros(len(m), dtype=np.bool) Reader = reader.Readers[os.environ["NULLTESTS_ENV"]] mapreader = Reader(os.environ["DDX9_LFI"], nside=nside, baseline_length=None) ps_mask, gal_mask = mapreader.read_masks(freq) # ps_mask = False # gal_mask = False smooth_combine( [(m, 1)], [(np.ones_like(m), 1)], spectra=True, smooth_mask=ps_mask, spectra_mask=gal_mask, metadata={"file_type": ""}, ) # check chi-square metadata = json.load(open("out_map.json", "r")) assert np.abs(metadata["map_unsm_chi2"] - 1) < 0.01 assert np.abs(metadata["map_chi2"] - 1) < 0.1 # check spectrum cl = hp.read_cl("out_cl.fits") realization_wn = cl[200:].mean() assert np.abs(realization_wn - metadata["whitenoise_cl"]) < 1e-5
def read_cl(path, has_polarization=True, unit=None, map_dist=None): """Read :math:`a_{\ell m}` from a FITS file Parameters ---------- path : str absolute or relative path to local file or file available remotely. has_polarization : bool read only temperature alm from file or also polarization map_dist : pysm.MapDistribution :math:`\ell_{max}` should be the same of the :math:`\ell_{max}` in the file and :math:`m_{max}=\ell_{max}`. """ filename = utils.RemoteData().get(path) mpi_comm = None if map_dist is None else map_dist.mpi_comm if (mpi_comm is not None and mpi_comm.rank == 0) or (mpi_comm is None): cl = hp.read_cl(filename) if unit is None: unit = u.Unit(extract_hdu_unit(filename)) return cl * unit
def test_anafast_xspectra(self): cl = hp.anafast(self.map1[0].filled(), self.map2[0].filled(), lmax=1024) self.assertEqual(len(cl), 1025) clx = hp.read_cl(os.path.join(self.path, 'data', 'clx.fits')) np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def setUp(self): self.path = os.path.dirname(os.path.realpath(__file__)) try: self.map1 = [ hp.ma(m) for m in hp.read_map( os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_W_v4.fits'), (0, 1, 2)) ] self.map2 = [ hp.ma(m) for m in hp.read_map( os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_V_v4.fits'), (0, 1, 2)) ] self.mask = hp.read_map( os.path.join( self.path, 'data', 'wmap_temperature_analysis_mask_r9_7yr_v4.fits')).astype( np.bool) except exceptions.IOError: warnings.warn( """Missing Wmap test maps from the data folder, please download them from Lambda and copy them in the test/data folder: http://lambda.gsfc.nasa.gov/data/map/dr4/skymaps/7yr/raw/wmap_band_iqumap_r9_7yr_W_v4.fits http://lambda.gsfc.nasa.gov/data/map/dr4/skymaps/7yr/raw/wmap_band_iqumap_r9_7yr_V_v4.fits http://lambda.gsfc.nasa.gov/data/map/dr4/ancillary/masks/wmap_temperature_analysis_mask_r9_7yr_v4.fits on Mac or Linux you can run the bash script get_wmap_maps.sh from the same folder """) raise for m in chain(self.map1, self.map2): m.mask = np.logical_not(self.mask) self.cla = hp.read_cl( os.path.join(self.path, 'data', 'cl_wmap_fortran.fits'))
def initialize(self): # Check required parameters if self.data_vector_file is None or self.cov_file is None: raise LoggedError( self.log, 'Must specify both `data_vector_file` and `cov_file`.') if self.lmin is None or self.lmax is None or self.dl is None: raise LoggedError( self.log, 'Must specify binning parameters `lmin`, `lmax`, and `dl`.') self.log.debug('Using spectrum binning (lmin, lmax, delta \\ell) = ' \ '({}, {}, {})'.format(self.lmin, self.lmax, self.dl)) # Load data vector and covariance self.data_vector = hp.read_cl(self.data_vector_file)[:self.lmax + 1] self.data_covariance = np.loadtxt(self.cov_file)[:self.lmax + 1, :self.lmax + 1] if self.do_rayleigh: try: freqi = np.arange(9)[self.freq == planck_freqs][0] except IndexError: msg = '{} is not a known planck band, try one of: {}' raise LoggedError(self.log, msg.format(self.freq, list(planck_freqs))) self.log.debug( 'Correcting for Rayleigh scattering in Planck channel {}'. format(self.freq)) # Compute & subtract the expected Rayleigh contribution for this # specific planck channel rayleigh_template = RayleighTemplate() nu4_eff = ffp8_nu4_central_freqs[freqi] nu6_eff = ffp8_nu6_central_freqs[freqi] self.data_vector -= rayleigh_template.TT(nu4_eff=nu4_eff, nu6_eff=nu6_eff, lmax=self.lmax) else: self.log.debug('Skipping Rayleigh scattering correction') # Load binning info & bin self.binmat = make_binmatrix(self.lmin, self.lmax, self.dl) self.binned_data_vector = np.dot(self.binmat, self.data_vector) self.binned_data_cov = np.dot( self.binmat, np.dot(self.data_covariance, self.binmat.T)) self.inv_binned_cov = np.linalg.inv(self.binned_data_cov) # Normalization of likelihood - just a multivariate gaussian cov_det_sign, log_cov_det = np.linalg.slogdet(self.binned_data_cov) if cov_det_sign < 0: raise LoggedError(self.log, 'determinant of covariance is negative') k = self.binned_data_cov.shape[0] self.loglike_norm = -0.5 * (k * np.log(2 * np.pi) + log_cov_det) if np.isinf(self.loglike_norm): raise LoggedError(self.log, 'normalization of likelihood is infinite')
def make_window_func_file(hpix_data_dir,nside): filename=hpix_data_dir+hpix_dict[nside] w=hp.read_cl(filename) l=np.arange(2*nside+1) wl=np.zeros(2*nside+1) wl=w[1][0:2*nside+1]# 1 implies polar outfile=str(hpix_dict[nside]) outfile=outfile.replace("fits","dat") np.savetxt(outfile,np.asarray([l,wl,wl]).T,delimiter=",",fmt="%d,%0.12e,%0.12e") # we need two columns
def get_cl(fname_cl, fname_map, fname_hits): if os.path.isfile(fname_cl): cl = hp.read_cl(fname_cl) else: hits = None # hp.read_map(path_hits) m = hp.read_map(fname_map, None) cl = map2cl(m, hits) hp.write_cl(fname_cl, cl) return cl
def get(dens_type=0, ishell=5, ngrid=512, rsd=True): dirin = get_path(dens_type, ishell, ngrid) if rsd: f1 = os.path.join(dirin, "clmean.fits") else: f1 = os.path.join(dirin, "clmean_norsd.fits") assert os.path.exists(f1), "file does not exist:" + f1 print("reading: " + f1) return hp.read_cl(f1)
def get_cl(fname_cl, fname_map, fname_hits): if os.path.isfile(fname_cl): cl = hp.read_cl(fname_cl) else: mask = get_mask(fname_hits) m = hp.read_map(fname_map, None) cl = map2cl(m, mask) hp.write_cl(fname_cl, cl) return cl
def test_anafast_xspectra(self): cl = hp.anafast(hp.remove_monopole(self.map1[0]), hp.remove_monopole(self.map2[0]), lmax=self.lmax) self.assertEqual(len(cl), self.lmax + 1) clx = hp.read_cl( os.path.join( self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_WVxspec_v4_udgraded32_II_lmax64_rmmono_3iter.fits' )) np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def setUp(self): try: self.map = hp.ma(hp.read_map(os.path.join('data', 'wmap_band_imap_r9_7yr_W_v4.fits'))) self.mask = hp.read_map(os.path.join('data', 'wmap_temperature_analysis_mask_r9_7yr_v4.fits')).astype(np.bool) except exceptions.IOError: warnings.warn("""Missing Wmap test maps from the data folder, please download them from Lambda and copy them in the test/data folder: http://lambda.gsfc.nasa.gov/data/map/dr4/skymaps/7yr/raw/wmap_band_imap_r9_7yr_W_v4.fits http://lambda.gsfc.nasa.gov/data/map/dr4/ancillary/masks/wmap_temperature_analysis_mask_r9_7yr_v4.fits on Mac or Linux you can run the bash script get_wmap_maps.sh from the same folder """) raise self.map.mask = np.logical_not(self.mask) self.cla = hp.read_cl(os.path.join('data', 'cl_wmap_fortran.fits'))
def gen_lgmca_like(lgmca_file, cov_file, lmin=70, lmax=2000, delta_ell=30): # Load covariance cov = np.loadtxt(cov_file) # Add l (l + 1) factor (i.e. convert cl -> dl) ells = np.arange(lmax + 1) ll1 = ells * (ells + 1) / (2 * np.pi) # Load data vector dls = hp.read_cl(lgmca_file)[:lmax + 1] / (getbeam(5, lmax) * hp.pixwin(2048, lmax=lmax))**2 # 1e12 is K^2 -> \mu K^2 return bare_gen_like(1e12 * ll1 * dls, cov, lmin=lmin, lmax=lmax, delta_ell=delta_ell)
def get_tf(fname_tf, fname_cmb_unlensed, fname_cmb_lensing, fname_output, fname_hits): if os.path.isfile(fname_tf): tf = hp.read_cl(fname_tf) else: inmap = hp.read_map(fname_cmb_unlensed, None) + hp.read_map(fname_cmb_lensing, None) inmap *= 1e-6 # into K_CMB inmap[0] = hp.remove_dipole(inmap[0]) outmap = hp.read_map(fname_output, None) hits = None # hp.read_map(fname_hits) cl_in = map2cl(inmap, hits) cl_out = map2cl(outmap, hits) tf = cl_out / cl_in hp.write_cl(fname_tf, tf) return tf
def gen_ffp8_1_like(planck_channel, ffp8_1_realization, cov_fname, lmin=70, lmax=2000, delta_ell=30): ''' Create a Cobaya likelihood function for a single channel of an FFP8.1 realization. planck_channel: int, one of [30, 44, 70, ..., 857] ffp8_1_realization: int, [0..99], realization number cov_fname: File with the data covariance matrix ''' freqi = np.arange(9)[planck_channel == planck_freqs][0] # Load covariance cov = np.loadtxt(cov_fname) # Load rayleigh templates rayleigh_base_dir = os.path.join(os.path.dirname(__file__), 'data', 'rayleigh', 'ffp8_1_rayleigh', 'Rayleigh') header = np.zeros((2, 5)) header[1, 0] = 1 nu_ref = 857 rayleigh_base = np.vstack((header, np.loadtxt(os.path.join(rayleigh_base_dir, 'planck_FFP8_1_total_lensedCls.dat')))) rayleigh_nu4 = np.vstack((header, np.loadtxt(os.path.join(rayleigh_base_dir, 'planck_FFP8_1_nu4_lensedCls.dat_6_6')))) rayleigh_nu6 = np.vstack((header, np.loadtxt(os.path.join(rayleigh_base_dir, 'planck_FFP8_1_nu6_lensedCls.dat_6_6')))) rayleigh_nu8 = np.vstack((header, np.loadtxt(os.path.join(rayleigh_base_dir, 'planck_FFP8_1_nu8_lensedCls.dat_6_6')))) # Compute effective rayleigh contribution to TT here nu4_eff = ffp8_nu4_central_freqs[freqi] nu6_eff = ffp8_nu6_central_freqs[freqi] rayleigh_contrib = 0 for template, nu_eff, pow in [(rayleigh_nu4, nu4_eff, 4), (rayleigh_nu6, nu6_eff, 6), (rayleigh_nu8, nu6_eff, 8)]: rayleigh_contrib += (template[:, 1] - rayleigh_base[:, 1]) * (nu_eff / nu_ref)**pow # Data vector Dl_input = hp.read_cl(_ffp8_1_chan_fmt.format(freq=planck_channel, real=ffp8_1_realization)) Dl_input = Dl_input[:lmax + 1] - rayleigh_contrib[:lmax + 1] return bare_gen_like(Dl_input, cov, lmin=lmin, lmax=lmax, delta_ell=delta_ell)
def generate_covariance(fname_fmt, output_fname, nvecs=100, beam_fname='FFP8_v1_aggregated_beam.txt'): first_vec = hp.read_cl(fname_fmt.format(0)) maxl = first_vec.size - 1 data_vecs = np.zeros((nvecs, maxl + 1)) ells = np.arange(maxl + 1) ll1 = ells * (ells + 1) / (2 * np.pi) for i in range(0, nvecs): # Convert \deg K -> \mu \deg K fname = fname_fmt.format(i) beam_fname = os.path.join(os.path.dirname(fname), 'FFP8_v1_aggregated_beam.txt') data_vecs[i] = ll1 * 1e12 * hp.read_cl( fname_fmt.format(i)) / (np.loadtxt(beam_fname)**2) covariance = np.cov(data_vecs.T) np.savetxt(output_fname, covariance) return covariance
def test_anafast_xspectra(self): cl = hp.anafast( hp.remove_monopole(self.map1[0]), hp.remove_monopole(self.map2[0]), lmax=self.lmax, ) self.assertEqual(len(cl), self.lmax + 1) clx = hp.read_cl( os.path.join( self.path, "data", "cl_wmap_band_iqumap_r9_7yr_WVxspec_v4_udgraded32_II_lmax64_rmmono_3iter.fits", ) ) np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def get_tf(fname_tf, fname_cmb_unlensed, fname_cmb_lensing, fname_output, fname_hits): if os.path.isfile(fname_tf): tf = hp.read_cl(fname_tf) else: inmap = hp.read_map(fname_cmb_unlensed, None) + hp.read_map( fname_cmb_lensing, None) inmap *= 1e-6 # into K_CMB inmap[0] = hp.remove_dipole(inmap[0]) outmap = hp.read_map(fname_output, None) mask = get_mask(fname_hits) cl_in = map2cl(inmap, mask) cl_out = map2cl(outmap, mask) tf = cl_out / cl_in hp.write_cl(fname_tf, tf) tf[:, lmax_tf:] = 1 tf[tf > 1] = 1 return tf
def total_beam(self, fwhm_in, lmax, pol=False): total_beam = None if fwhm_in < .99 * self.fwhm: if self.quickpolbeam is None: total_beam = hp.gauss_beam(self.fwhm * arcmin, lmax=lmax, pol=True) total_beam = total_beam[:, 0:3].copy() else: total_beam = np.array(hp.read_cl(self.quickpolbeam)) if total_beam.ndim == 1: total_beam = np.vstack( [total_beam, total_beam, total_beam]) total_beam = total_beam[:, :lmax + 1].T.copy() beam_in = hp.gauss_beam(fwhm_in * arcmin, lmax=lmax, pol=True) beam_in = beam_in[:, 0:3].copy() good = beam_in != 0 total_beam[good] /= beam_in[good] if pol: total_beam = np.ascontiguousarray(total_beam[:, (1, 2)]) else: total_beam = np.ascontiguousarray(total_beam[:, 0:1]) return total_beam
def test(useCLASS=1, useLensing=1, classCamb=1, nSims=1000, lmax=100, lmin=2, newSMICA=False, newDeg=False, suppressC2=False, suppFactor=0.23, filterC2=False, filtFacLow=0.1, filtFacHigh=0.2, R1=False): """ code for testing the other functions in this module Inputs: useCLASS: set to 1 to use CLASS, 0 to use CAMB CLASS Cl has early/late split at z=50 CAMB Cl has ISWin/out split: ISWin: 0.4<z<0.75, ISWout: the rest Note: CAMB results include primary in ISWin and ISWout (not as intended) default: 1 useLensing: set to 1 to use lensed Cl, 0 for non-lensed default: 1 classCamb: if 1: use the CAMB format of CLASS output, if 0: use CLASS format Note: parameter not used if useCLASS = 0 default: 1 nSims: the number of simulations to do for ensemble default: 1000 lmax: the highest l to include in Legendre transforms default: 100 lmin: the lowest l to include in S_{1/2} = CIC calculations default: 2 newSMICA: set to True to recalculate SMICA results default: False newDeg: set to True to recalculate map and mask degredations (only if newSMICA is also True) default: False suppressC2: set to True to suppress theoretical C_2 (quadrupole) by suppFactor before creating a_lm.s Default: False suppFactor: multiplies C_2 if suppressC2 is True Default: 0.23 # from Tegmark et. al. 2003, figure 13 (WMAP) filterC2 : set to true to filter simulated CMBs after spice calculates cut sky C_l. Sims will pass filter if C_2 * filtFacLow < C_2^sim < C_2 * filtFacHigh. Default: False filtFacLow,filtFacHigh: defines C_2 range for passing simulated CMBs Default: 0.1,0.2 R1: set to True to use SMICA and Mask R1. Otherwise, R2 used. Only affects calculation of newly degraded map. Default: False """ # load data ell, fullCl, primCl, lateCl, crossCl = gcp.loadCls(useCLASS=useCLASS, useLensing=useLensing, classCamb=classCamb) # fill beginning with zeros startEll = int(ell[0]) ell = np.append(np.arange(startEll), ell) fullCl = np.append(np.zeros(startEll), fullCl) primCl = np.append(np.zeros(startEll), primCl) lateCl = np.append(np.zeros(startEll), lateCl) crossCl = np.append(np.zeros(startEll), crossCl) # suppress C_2 to see what happens in enesmble if suppressC2: fullCl[2] *= suppFactor primCl[2] *= suppFactor lateCl[2] *= suppFactor crossCl[2] *= suppFactor conv = ell * (ell + 1) / (2 * np.pi) #print ell,conv #ell[0]=2.0 """ # verify statistical properties of alm realizations nSims = 1000 lmax = 100 Clprim_sum = np.zeros(lmax+1) Cllate_sum = np.zeros(lmax+1) Clcros_sum = np.zeros(lmax+1) # prim x late for nSim in range(nSims): print 'starting sim ',nSim+1, ' of ',nSims #alm_prim,alm_late = getAlms(A_lij,lmax=lmax) #AKW method defunct # see if synalm can do it alm_prim,alm_late = hp.synalm((primCl,lateCl,crossCl),lmax=lmax,new=True) Clprim_sum = Clprim_sum + hp.alm2cl(alm_prim) Cllate_sum = Cllate_sum + hp.alm2cl(alm_late) Clcros_sum = Clcros_sum + hp.alm2cl(alm_prim,alm_late) Cl_prim_avg = Clprim_sum/nSims Cl_late_avg = Cllate_sum/nSims Cl_cros_avg = Clcros_sum/nSims doPlot = True if doPlot: plt.plot(ell[:lmax+1],Cl_prim_avg*conv[:lmax+1]) plt.plot(ell[:lmax+1],primCl[:lmax+1]*conv[:lmax+1]) plt.title('primary') plt.ylabel('D_l') plt.show() plt.plot(ell[:lmax+1],Cl_late_avg*conv[:lmax+1]) plt.plot(ell[:lmax+1],lateCl[:lmax+1]*conv[:lmax+1]) plt.title('late') plt.ylabel('D_l') plt.show() plt.plot(ell[:lmax+1],Cl_cros_avg*conv[:lmax+1]) plt.plot(ell[:lmax+1],crossCl[:lmax+1]*conv[:lmax+1]) plt.title('cross') plt.ylabel('D_l') plt.show() """ # get covariances from SMICA map and mask theta_i = 0.0 #degrees theta_f = 180.0 #degrees nSteps = 1800 #lmax = 100 """ # don't want anafast after all # get unmasked and masked SMICA covariances # note: getSMICA uses linspace in theta for thetaArray #newSMICA = False#True thetaArray2, C_SMICA, C_SMICAmasked, S_SMICAnomask, S_SMICAmasked = \ getSMICA(theta_i=theta_i,theta_f=theta_f,nSteps=nSteps,lmax=lmax,lmin=lmin, newSMICA=newSMICA,newDeg=newDeg,useSPICE=False,R1=R1) print '' print 'S_{1/2}(anafast): SMICA, no mask: ',S_SMICAnomask,', masked: ',S_SMICAmasked print '' """ # get C_l from SPICE to compare to above method # note: getSMICA uses linspace in theta for thetaArray #newSMICA = False#True thetaArray2sp, C_SMICAsp, C_SMICAmaskedsp, S_SMICAnomasksp, S_SMICAmaskedsp = \ getSMICA(theta_i=theta_i,theta_f=theta_f,nSteps=nSteps,lmax=lmax,lmin=lmin, newSMICA=newSMICA,newDeg=newDeg,useSPICE=True,R1=R1) print '' print 'S_{1/2}(spice): SMICA, no mask: ', S_SMICAnomasksp, ', masked: ', S_SMICAmaskedsp print '' # Find S_{1/2} in real space to compare methods nTerms = 10000 #SSnm2 = SOneHalf(thetaArray2, C_SMICA, nTerms=nTerms) #SSmd2 = SOneHalf(thetaArray2, C_SMICAmasked, nTerms=nTerms) SSnm2sp = SOneHalf(thetaArray2sp, C_SMICAsp, nTerms=nTerms) SSmd2sp = SOneHalf(thetaArray2sp, C_SMICAmaskedsp, nTerms=nTerms) # create ensemble of realizations and gather statistics covEnsembleFull = np.zeros([nSims, nSteps + 1]) # for maskless covEnsembleCut = np.zeros([nSims, nSteps + 1]) # for masked sEnsembleFull = np.zeros(nSims) sEnsembleCut = np.zeros(nSims) covTheta = np.array([]) #nSims = 1000 # apply beam and pixel window functions to power spectra # note: to ignore the non-constant pixel shape, W(l) must be > B(l) # however, this is not true for NSIDE=128 and gauss_beam(5') # Here I ignore this anyway and proceed myNSIDE = 128 # must be same NSIDE as in getSMICA function Wpix = hp.pixwin(myNSIDE) Bsmica = hp.gauss_beam(5. / 60 * np.pi / 180) # 5 arcmin WlMax = Wpix.size if WlMax < lmax: print 'die screaming!!!' return 0 primCl = primCl[:WlMax] * (Wpix * Bsmica)**2 lateCl = lateCl[:WlMax] * (Wpix * Bsmica)**2 crossCl = crossCl[:WlMax] * (Wpix * Bsmica)**2 # note: i tried sims without this scaling, and results seemed the same at a glance # collect simulated Cl for comparison to model Clsim_full_sum = np.zeros(lmax + 1) # get Jmn matrix for harmonic space S_{1/2} calc. myJmn = getJmn(lmax=lmax) # set up ramdisk for SpICE # super lame that spice needs to read/write from disk, but here goes... RAMdisk = '/Volumes/ramdisk/' ClTempFile = RAMdisk + 'tempCl.fits' mapTempFile = RAMdisk + 'tempMap.fits' mapDegFile = RAMdisk + 'smicaMapDeg.fits' # this should have been created by sims.getSMICA maskDegFile = RAMdisk + 'maskMapDeg.fits' # this should have been created by sims.getSMICA # create RAM Disk for SpICE and copy these files there using bash RAMsize = 4 #Mb ramDiskOutput = subprocess.check_output('./ramdisk.sh create ' + str(RAMsize), shell=True) print ramDiskOutput diskID = ramDiskOutput[ 31:41] # this might not grab the right part; works for '/dev/disk1' subprocess.call('cp smicaMapDeg.fits ' + RAMdisk, shell=True) subprocess.call('cp maskMapDeg.fits ' + RAMdisk, shell=True) doTime = True # to time the run and print output startTime = time.time() #for nSim in range(nSims): nSim = 0 while nSim < nSims: print 'starting sim ', nSim + 1, ' of ', nSims alm_prim, alm_late = hp.synalm((primCl, lateCl, crossCl), lmax=lmax, new=True) # calculate C(theta) of simulation Clsim_prim = hp.alm2cl(alm_prim) Clsim_late = hp.alm2cl(alm_late) Clsim_cros = hp.alm2cl(alm_prim, alm_late) Clsim_full = Clsim_prim + 2 * Clsim_cros + Clsim_late # use Cl_sim_full to omit prim/late distinction for now # start with a mask # -> for optional C2 filtering based on cut sky map # alm2map should create map with default RING ordering # pixel window and beam already accounted for in true Cls #mapSim = hp.alm2map(alm_prim+alm_late,myNSIDE,lmax=lmax,pixwin=True,sigma=5./60*np.pi/180) mapSim = hp.alm2map(alm_prim + alm_late, myNSIDE, lmax=lmax) hp.write_map(mapTempFile, mapSim) ispice(mapTempFile, ClTempFile, maskfile1=maskDegFile, subav="YES", subdipole="YES") Cl_masked = hp.read_cl(ClTempFile) ell2 = np.arange(Cl_masked.shape[0]) # Check for low power of cut sky C_2 if (filterC2 == True and fullCl[2] * filtFacHigh > Cl_masked[2] and Cl_masked[2] > fullCl[2] * filtFacLow) or filterC2 == False: # note: getCovar uses linspace in x for thetaArray thetaArray, cArray2 = getCovar(ell2[:lmax + 1], Cl_masked[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) covEnsembleCut[nSim] = cArray2 # S_{1/2} sEnsembleCut[nSim] = np.dot( Cl_masked[lmin:lmax + 1], np.dot(myJmn[lmin:, lmin:], Cl_masked[lmin:lmax + 1])) doPlot = False #True if doPlot: plt.plot(thetaArray, cArray) plt.xlabel('theta (degrees)') plt.ylabel('C(theta)') plt.title('covariance of CMB simulation ' + str(nSim + 1)) plt.show() # now without the mask # uses the same sims that passed the C2 filter Clsim_full_sum += Clsim_full # note: getCovar uses linspace in x for thetaArray thetaArray, cArray = getCovar(ell[:lmax + 1], Clsim_full[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) covEnsembleFull[nSim] = cArray covTheta = thetaArray # S_{1/2} sEnsembleFull[nSim] = np.dot( Clsim_full[lmin:], np.dot(myJmn[lmin:, lmin:], Clsim_full[lmin:])) nSim += 1 if doTime: print 'time elapsed: ', int( (time.time() - startTime) / 60.), ' minutes' # free the RAM used by SpICE's RAM disk ramDiskOutput = subprocess.check_output('./ramdisk.sh delete ' + diskID, shell=True) print ramDiskOutput avgEnsembleFull = np.average(covEnsembleFull, axis=0) stdEnsembleFull = np.std(covEnsembleFull, axis=0) # do I need a better way to describe confidence interval? avgEnsembleCut = np.average(covEnsembleCut, axis=0) stdEnsembleCut = np.std(covEnsembleCut, axis=0) Clsim_full_avg = Clsim_full_sum / nSims # save results saveFile1 = "simStatResultC.npy" np.save( saveFile1, np.vstack((thetaArray, avgEnsembleFull, stdEnsembleFull, avgEnsembleCut, stdEnsembleCut))) saveFile2 = "simStatC_SMICA.npy" np.save(saveFile2, np.vstack((thetaArray2sp, C_SMICAsp, C_SMICAmaskedsp))) saveFile3 = "simStatResultS.npy" np.save( saveFile3, np.vstack((np.hstack((np.array(S_SMICAnomasksp), sEnsembleFull)), np.hstack((np.array(S_SMICAmaskedsp), sEnsembleCut))))) doPlot = True if doPlot: print 'plotting C_l... ' #print ell.size,conv.size,primCl.size,crossCl.size,lateCl.size plt.plot(ell[:lmax + 1], conv[:lmax + 1] * (primCl + 2 * crossCl + lateCl)[:lmax + 1], label='model D_l') plt.plot(ell[:lmax + 1], conv[:lmax + 1] * Clsim_full_avg, label='ensemble average D_l') plt.legend() plt.show() makePlots(saveFile1=saveFile1, saveFile2=saveFile2, saveFile3=saveFile3) # S_{1/2} output print '' print 'using CIC method: ' #print 'S_{1/2}(anafast): SMICA, no mask: ',S_SMICAnomask,', masked: ',S_SMICAmasked print 'S_{1/2}(spice): SMICA, no mask: ', S_SMICAnomasksp, ', masked: ', S_SMICAmaskedsp print '' print 'using CCdx method: ' #print 'S_{1/2}(anafast): SMICA, no mask: ',SSnm2,', masked: ',SSmd2 print 'S_{1/2}(spice): SMICA, no mask: ', SSnm2sp, ', masked: ', SSmd2sp print ''
def test(useCLASS=1, useLensing=1, classCamb=1, nSims=1000, lmax=100, lmin=2, newSMICA=False, newDeg=False, suppressC2=False, suppFactor=0.23): """ code for testing the other functions in this module Inputs: useCLASS: set to 1 to use CLASS, 0 to use CAMB CLASS Cl has early/late split at z=50 CAMB Cl has ISWin/out split: ISWin: 0.4<z<0.75, ISWout: the rest Note: CAMB results include primary in ISWin and ISWout (not as intended) default: 1 useLensing: set to 1 to use lensed Cl, 0 for non-lensed default: 1 classCamb: if 1: use the CAMB format of CLASS output, if 0: use CLASS format Note: parameter not used if useCLASS = 0 default: 1 nSims: the number of simulations to do for ensemble default: 1000 lmax: the highest l to include in Legendre transforms default: 100 lmin: the lowest l to include in S_{1/2} = CIC calculations default: 2 newSMICA: set to True to recalculate SMICA results default: False newDeg: set to True to recalculate map and mask degredations (only if newSMICA is also True) default: False suppressC2: set to True to suppress theoretical C_2 by suppFactor before creating a_lm.s Default: False suppFactor: multiplies C_2 if suppressC2 is True Default: 0.23 # from Tegmark et. al. 2003, figure 13 (WMAP) """ ############################################################################## # load theoretical power spectra # load data ell, fullCl, primCl, lateCl, crossCl = gcp.loadCls(useCLASS=useCLASS, useLensing=useLensing, classCamb=classCamb) # fill beginning with zeros startEll = int(ell[0]) ell = np.append(np.arange(startEll), ell) fullCl = np.append(np.zeros(startEll), fullCl) primCl = np.append(np.zeros(startEll), primCl) lateCl = np.append(np.zeros(startEll), lateCl) crossCl = np.append(np.zeros(startEll), crossCl) # suppress C_2 to see what happens in enesmble #suppressC2 = False #suppFactor = 0.23 # from Tegmark et. al. 2003, figure 13 (WMAP) if suppressC2: fullCl[2] *= suppFactor primCl[2] *= suppFactor lateCl[2] *= suppFactor crossCl[2] *= suppFactor conv = ell * (ell + 1) / (2 * np.pi) #print ell,conv #ell[0]=2.0 # apply beam and pixel window functions to power spectra # note: to ignore the non-constant pixel shape, W(l) must be > B(l) # however, this is not true for NSIDE=128 and gauss_beam(5') # Here I ignore this anyway and proceed myNSIDE = 128 # must be same NSIDE as in sims.getSMICA function Wpix = hp.pixwin(myNSIDE) Bsmica = hp.gauss_beam(5. / 60 * np.pi / 180) # 5 arcmin WlMax = Wpix.size if WlMax < lmax: print 'die screaming!!!' return 0 fullCl = fullCl[:WlMax] * (Wpix * Bsmica)**2 primCl = primCl[:WlMax] * (Wpix * Bsmica)**2 lateCl = lateCl[:WlMax] * (Wpix * Bsmica)**2 crossCl = crossCl[:WlMax] * (Wpix * Bsmica)**2 # note: i tried sims without this scaling, and results seemed the same at a glance ############################################################################## # load SMICA data, converted to C(theta), via SpICE if newSMICA: theta_i = 0.0 #degrees theta_f = 180.0 #degrees nSteps = 1800 thetaArray2sp, C_SMICAsp, C_SMICAmaskedsp, S_SMICAnomasksp, S_SMICAmaskedsp = \ sims.getSMICA(theta_i=theta_i,theta_f=theta_f,nSteps=nSteps,lmax=lmax,lmin=lmin, newSMICA=newSMICA,newDeg=newDeg,useSPICE=True) # filenames for SpICE to use # super lame that spice needs to read/write from disk, but here goes... RAMdisk = '/Volumes/ramdisk/' ClTempFile = RAMdisk + 'tempCl.fits' mapTempFile = RAMdisk + 'tempMap.fits' mapDegFile = RAMdisk + 'smicaMapDeg.fits' # this should have been created by sims.getSMICA maskDegFile = RAMdisk + 'maskMapDeg.fits' # this should have been created by sims.getSMICA # create RAM Disk for SpICE and copy these files there using bash RAMsize = 4 #Mb ramDiskOutput = subprocess.check_output('./ramdisk.sh create ' + str(RAMsize), shell=True) print ramDiskOutput diskID = ramDiskOutput[ 31:41] # this might not grab the right part; works for '/dev/disk1' subprocess.call('cp smicaMapDeg.fits ' + RAMdisk, shell=True) subprocess.call('cp maskMapDeg.fits ' + RAMdisk, shell=True) ispice(mapDegFile, ClTempFile, maskfile1=maskDegFile, subav="YES", subdipole="YES") ClsmicaCut = hp.read_cl(ClTempFile) # find S_{1/2} for SMICA. Should actually optimize but see what happens here first. #myJmn = legprodint.getJmn(endX=0.5,lmax=lmax,doSave=False) #Ssmica = np.dot(ClsmicaCut[lmin:lmax+1],np.dot(myJmn[lmin:,lmin:], # ClsmicaCut[lmin:lmax+1]))*1e24 #K^4 to microK^4 ############################################################################## # create ensemble of realizations and gather statistics spiceMax = myNSIDE * 3 # should be lmax+1 for SpICE ClEnsembleCut = np.zeros([nSims, spiceMax]) simEll = np.arange(spiceMax) doTime = True # to time the run and print output startTime = time.time() for nSim in range(nSims): print 'starting masked Cl sim ', nSim + 1, ' of ', nSims alm_prim, alm_late = hp.synalm((primCl, lateCl, crossCl), lmax=lmax, new=True) mapSim = hp.alm2map(alm_prim + alm_late, myNSIDE, lmax=lmax) hp.write_map(mapTempFile, mapSim) ispice(mapTempFile, ClTempFile, maskfile1=maskDegFile, subav="YES", subdipole="YES") ClEnsembleCut[nSim] = hp.read_cl(ClTempFile) doPlot = False #True if doPlot: gcp.showCl(simEll[:lmax + 1], ClEnsembleCut[nSim, :lmax + 1], title='power spectrum of simulation ' + str(nSim + 1)) timeInterval1 = time.time() - startTime if doTime: print 'time elapsed: ', int(timeInterval1 / 60.), ' minutes' # free the RAM used by SpICE's RAM disk ramDiskOutput = subprocess.check_output('./ramdisk.sh delete ' + diskID, shell=True) print ramDiskOutput # put SMICA in as 0th member of the ensemble; 1e12 to convert K^2 to microK^2 ClEnsembleCut = np.vstack((ClsmicaCut * 1e12, ClEnsembleCut)) nSims += 1 ############################################################################## # create S(x) for each C_l, using interpolation nXvals = 181 thetaVals = np.linspace(0, 180, nXvals) # one degree intervals xVals = np.cos(thetaVals * np.pi / 180) Jmnx = np.empty([nXvals, lmax + 1, lmax + 1]) for index, xVal in enumerate(xVals): Jmnx[index] = legprodint.getJmn(endX=xVal, lmax=lmax, doSave=False) SxToInterpolate = np.empty(nXvals) # create list of functions dummy = lambda x: x**2 SofXList = [dummy for i in range(nSims)] for nSim in range(nSims): print 'starting S(x) sim ', nSim + 1, ' of ', nSims for index, xVal in enumerate(xVals): SxToInterpolate[index] = np.dot( ClEnsembleCut[nSim, lmin:lmax + 1], np.dot(Jmnx[index, lmin:, lmin:], ClEnsembleCut[nSim, lmin:lmax + 1])) SofX = interp1d(xVals, SxToInterpolate) #SofXList = SofXList.append(SofX) # Apparently appending a function to an empty list is not allowed. Instead: SofXList[nSim] = SofX #print SofXList#[nSim] doPlot = False #True if doPlot: nplotx = (nXvals - 1) * 10 + 1 plotTheta = np.linspace(0, 180, nplotx) plotx = np.cos(plotTheta * np.pi / 180) plotS = SofXList[nSim](plotx) plt.plot(plotx, plotS) plt.title('S(x) for simulation ' + str(nSim + 1)) plt.show() doPlot = True if doPlot: for nSim in range(nSims): nplotx = (nXvals - 1) * 10 + 1 plotTheta = np.linspace(0, 180, nplotx) plotx = np.cos(plotTheta * np.pi / 180) plotS = SofXList[nSim](plotx) plt.plot(plotx, plotS, label='sim ' + str(nSim + 1)) #plt.legend() plt.title('S(x) for ' + str(nSims) + ' simulations') plt.xlabel('x') plt.ylabel('S_x') plt.show() ############################################################################## # create Pval(x) for each S(x), using ensemble # Pval: probability of result equal to or more extreme # create list of functions PvalOfXList = [dummy for i in range(nSims)] for nSim in range(nSims): print 'starting Pval(x) sim ', nSim + 1, ' of ', nSims def PvalOfX(x): nUnder = 0 # will also include nEqual nOver = 0 threshold = SofXList[nSim](x) for nSim2 in range(nSims): Sx = SofXList[nSim2](x) if Sx > threshold: nOver += 1 #print "Over! mySx: ",Sx,", threshold: ",threshold else: nUnder += 1 #print "Under! mySx: ",Sx,", threshold: ",threshold #print "nUnder: ",nUnder,", nOver: ",nOver return nUnder / float(nUnder + nOver) PvalOfXList[nSim] = PvalOfX ############################################################################## # find global minimum for each Pval(x) # simply use same xVals as above, at one degree intervals # if there are equal p-values along the range, the one with the highest xVal # will be reported PvalMinima = np.empty(nSims) xValMinima = np.empty(nSims) doTime = True # to time the run and print output startTime = time.time() for nSim in range(nSims): print 'starting minimum Pval(x) search for sim ', nSim + 1, ' of ', nSims PvalOfX = PvalOfXList[nSim] #print 'function: ',PvalOfX PvalMinima[nSim] = PvalOfX(1.0) xValMinima[nSim] = 1.0 Pvals = np.empty(nXvals) for index, xVal in enumerate( xVals): # will start from 1 and go down to -1 myPval = PvalOfX(xVal) Pvals[index] = myPval #print "nSim: ",nSim,", n: ",index,", myPval: ",myPval,", PvalMinima[nSim]: ",PvalMinima[nSim] if myPval < PvalMinima[ nSim] and xVal > -0.999: #avoid the instabililility PvalMinima[nSim] = myPval xValMinima[nSim] = xVal #print 'nSim: ',nSim+1,', new x for minimum Pval: ',xVal #raw_input("Finished sim "+str(nSim+1)+" of "+str(nSims)+". Press enter to continue") doPlot = True #False#True if doPlot: # and np.random.uniform() < 0.1: #randomly choose about 1/10 of them plt.plot(xVals, Pvals) plt.vlines(xValMinima[nSim], 0, 1) plt.xlabel('x = cos(theta), min at ' + str(xValMinima[nSim])) plt.ylabel('P-value') plt.title('P-values for simulation ' + str(nSim + 1) + ' of ' + str(nSims) + ', p_min = ' + str(PvalMinima[nSim])) plt.xlim(-1.05, 1.05) plt.ylim(-0.05, 1.05) plt.show() timeInterval2 = time.time() - startTime if doTime: print 'time elapsed: ', int(timeInterval2 / 60.), ' minutes' """ # A MYSTERY! Something about the following code causes Pvals to always take # the values of PvalOfXList[nSims](xVals) WTF? Omit for now. # Testing seems to indicate that PvalOfXList functions still have different # locations in memory, but they all seem to be evaluating the same. # However, when the previous block of code is copied to come again after # this one, it behaves properly again. # see how well it did doPlot = False#True if doPlot: nPlots = 10 for nPlot in range(nPlots): print 'plot ',nPlot+1,' of ',nPlots toPlot = nPlot#np.random.randint(0,high=nSims) #for nSim in range(nSims): Pvals = np.empty(nXvals) PvalOfX = PvalOfXList[nPlot] print 'function: ',PvalOfX for index, xVal in enumerate(xVals): Pvals[index] = PvalOfX(xVal) #print index,Pvals[index] #print Pvals plt.plot(xVals,Pvals) plt.vlines(xValMinima[toPlot],0,1) plt.xlabel('x = cos(theta), min at '+str(xValMinima[toPlot])) plt.ylabel('P-value') plt.title('P-values for simulation '+str(toPlot+1)+' of '+str(nSims)) plt.show() """ ############################################################################## # create distribution of S(xValMinima) SxEnsembleMin = np.empty(nSims) for nSim in range(nSims): SxEnsembleMin[nSim] = SofXList[nSim](xValMinima[nSim]) # extract SMICA result Ssmica = SxEnsembleMin[0] ############################################################################## # plot/print results print 'plotting S_x distribution... ' myBins = np.logspace(1, 7, 100) plt.axvline(x=Ssmica, color='g', linewidth=3, label='SMICA masked') plt.hist(SxEnsembleMin[1:], bins=myBins, histtype='step', label='cut sky') # [1:] to omit SMICA value plt.gca().set_xscale("log") plt.legend() plt.xlabel('S_x (microK^4)') plt.ylabel('Counts') plt.title('S_x of ' + str(nSims - 1) + ' simulated CMBs') #-1 due to SMICA in zero position plt.show() print ' ' print 'nSims = ', nSims - 1 print 'time interval 1: ', timeInterval1, 's, time interval 2: ', timeInterval2, 's' print ' => ', timeInterval1 / (nSims - 1), ' s/sim, ', timeInterval2 / ( nSims - 1), ' s/sim' print 'SMICA optimized S_x: S = ',Ssmica,', for x = ',xValMinima[0], \ ', with p-value ',PvalMinima[0] print ' ' print 'step 3: profit' print ''
import numpy as np import healpy as hp from astropy.io import fits from rotate_tqu import rotate_tqu import matplotlib.pyplot as plt cl_file='/home/matt/wmap/simul_scalCls.fits' radio_file='/data/wmap/faraday_MW_realdata.fits' fwhm=[27.3,11.7] bands=[43.1,94.5] wl=np.array([299792458./(b*1e9) for b in bands]) fwhm_gen=[0,5,11.7,27.3,45,60] nside=512 npix=hp.nside2npix(512) cl_gen=hp.read_cl(cl_file) alpha=hp.read_map(radio_file,hdu='maps/phi') alpha=hp.ud_grade(alpha,nside) const=2*(wl[0]**2-wl[1]**2) plt.figure() for f in fwhm_gen: simul_cmb=hp.synfast(cl_gen,nside,pol=1,new=1,fwhm=f*np.pi/(180.*60)) rot_1=rotate_tqu(simul_cmb,wl[0],alpha) rot_2=rotate_tqu(simul_cmb,wl[1],alpha) Delta_Q=(rot_1[1]-rot_2[1])/const alpha_U=alpha*rot_1[2] dQ=hp.ma(Delta_Q) aU=hp.ma(alpha_U) dQ=hp.smoothing(dQ,fwhm=np.pi/180.) aU=hp.smoothing(dQ,fwhm=np.pi/180.)
def main(): ##Parameters for Binning, Number of Runs ## Beam correction use_beam=0 N_runs=10 bins=[1,5,10,20,25,50] gal_cut=[-20,-10,-05,00,05,10,20] bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2 l=np.arange(3*nside_out) ll=l*(l+1)/(2*np.pi) #map_prefix='/home/matt/Planck/data/faraday/simul_maps/' #file_prefix=map_prefix+'planck_simulated_' alpha_file='/data/wmap/faraday_MW_realdata.fits' #wl=np.array([299792458./(band*1e9) for band in bands]) #theory1_array_in=[] #theory2_array_in=[] dsets=glob.glob('/data/Planck/LFI*1024*R2.00*.fits') dsets.sort() #simulate_fields.main() for cut in gal_cut: cross1_array_in=[] cross2_array_in=[] dcross1_array_in=[] dcross2_array_in=[] Ndq_array_in=[] Ndu_array_in=[] Nau_array_in=[] Naq_array_in=[] noise1_array_in=[] noise2_array_in=[] print('Galactic cut: {:2d}'.format(cut)) for i in xrange(len(bands)-1): for j in xrange(i+1,len(bands)): print(' Bands: {0:0>3.0f} x {1:0>3.0f}'.format(bands[i],bands[j])) tmp_cross1_array=[] tmp_cross2_array=[] tmp_noise1_array=[] tmp_noise2_array=[] ##arrays used to average over noise realizations tmp_Ndq_array=[] tmp_Ndu_array=[] tmp_Nau_array=[] tmp_Naq_array=[] tmp_c1,tmp_c2=correlate_signal(dsets[i],dsets[j],wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,gal_cut=cut) cross1_array_in.append(tmp_c1) cross2_array_in.append(tmp_c2) for n in xrange(N_runs): print('\tNoise Correlation #{:0>3d}'.format(n+1)) tmp_n1,tmp_n2,tmp_dq,tmp_du,tmp_au,tmp_aq=correlate_noise(dsets[i],dsets[j],wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,gal_cut=cut) tmp_noise1_array.append(tmp_n1) tmp_noise2_array.append(tmp_n2) tmp_Ndq_array.append(tmp_dq) tmp_Ndu_array.append(tmp_du) tmp_Naq_array.append(tmp_aq) tmp_Nau_array.append(tmp_au) noise1_array_in.append(np.mean(tmp_noise1_array,axis=0)) noise2_array_in.append(np.mean(tmp_noise2_array,axis=0)) Ndq_array_in.append(np.mean(tmp_Ndq_array)) Ndu_array_in.append(np.mean(tmp_Ndu_array)) Nau_array_in.append(np.mean(tmp_Nau_array)) Naq_array_in.append(np.mean(tmp_Naq_array)) #print( str(np.shape(cross1_array_in) ) + ' line 587') #read in theory_cls theory_in=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits') #print( str(np.shape(cross1_array_in)) + ' line 592' ) #out_dic={'theory':theory_in,'c1':cross1_array_in,'c2':cross2_array_in,'n1':noise1_array_in,'n2':noise2_array_in,'ndq':Ndq_array_in,'nau':Nau_array_in,'ndu':Ndu_array_in,'naq':Naq_array_in} np.savez('FR_planck_cut_{0:2>02d}.npz'.format(cut),theory=theory_in,c1=cross1_array_in,c2=cross2_array_in,n1=noise1_array_in,n2=noise2_array_in,ndq=Ndq_array_in,nau=Nau_array_in,ndu=Ndu_array_in,naq=Naq_array_in) #print( str(np.shape(cross1_array_in)) + ' line 596') if cut >= 0: fsky= 1. - np.sin(cut*np.pi/180.) else: fsky= np.abs(np.sin(cut*np.pi/180.)) L=np.sqrt(fsky*4*np.pi) dl_eff=2*np.pi/L #print( str(np.shape(cross1_array_in)) + ' line 604') #ipdb.set_trace() cross1_array_in=np.array(cross1_array_in)/fsky cross2_array_in=np.array(cross2_array_in)/fsky noise1_array_in=np.array(noise1_array_in)/fsky noise2_array_in=np.array(noise2_array_in)/fsky Nau_array_in=np.array(Nau_array_in)/fsky Naq_array_in=np.array(Naq_array_in)/fsky Ndu_array_in=np.array(Ndu_array_in)/fsky Ndq_array_in=np.array(Ndq_array_in)/fsky #ipdb.set_trace() for b in bins: #N_dq=np.mean(Ndq_array_in) #N_au=np.mean(Nau_array_in) #Transpose arrays to match dimensions for operations #cross1_array_in=cross1_array_in.T #cross2_array_in=cross2_array_in.T #noise1_array_in=noise1_array_in.T #noise2_array_in=noise2_array_in.T #ipdb.set_trace() delta1=np.sqrt( np.divide(2.*abs( (cross1_array_in - noise1_array_in).T**2 + (cross1_array_in - noise1_array_in). T * (Ndq_array_in+Nau_array_in)/2. + Ndq_array_in*Nau_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))) #N_du=np.mean(Ndu_array_in) #N_aq=np.mean(Naq_array_in) delta2=np.sqrt( np.divide(2.*abs( (cross2_array_in - noise2_array_in).T**2 + (cross2_array_in - noise2_array_in).T * (Ndu_array_in+Naq_array_in)/2. + Ndu_array_in*Naq_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))) #Transpose arrays back to match for plotting #cross1_array=cross1_array_in.T #cross2_array=cross2_array_in.T #noise1_array=noise1_array_in.T #noise2_array=noise2_array_in.T #ipdb.set_trace() cosmic=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*theory_in**2) delta_array=np.sqrt(delta1**2+delta2**2) cross_array = np.add(np.subtract(cross1_array_in,noise1_array_in),np.subtract(cross2_array_in,noise2_array_in)) cross=np.average(cross_array,weights=1./delta_array**2,axis=0) dcross=np.average(delta_array,weights=1./delta_array**2,axis=0) plot_l=[] if( b != 1): tmp_c1=bin_llcl.bin_llcl(ll*cross/bls,b) cross=tmp_c1['llcl'] tmp_dc1=bin_llcl.bin_llcl(ll*dcross/bls,b) dcross= tmp_dc1['llcl'] plot_l=tmp_c1['l_out'] tmp_t1=bin_llcl.bin_llcl(ll*theory_in/bls,b) theory=tmp_t1['llcl'] tmp_c1=bin_llcl.bin_llcl(ll*cosmic/bls,b) cosmic=tmp_c1['llcl'] else: plot_l=l theory=np.multiply(ll/bls,theory_in) #cross1_array=np.multiply(ll/bls,cross1_array_in) #noise1_array=np.multiply(ll/bls,noise1_array_in) #cross2_array=np.multiply(ll/bls,cross2_array_in) #noise2_array=np.multiply(ll/bls,noise2_array_in) cross*=ll/bls cosmic*=ll/bls dcross*=ll/bls #cosmic2*=ll/bls #delta1*=ll/bls #delta2*=ll/bls #ipdb.set_trace() bad=np.where(plot_l < dl_eff) #noise1=np.mean(noise1_array,axis=0) #noise2=np.mean(noise2_array,axis=0) #theory_array = np.add(theory1_array,theory2_array) #theory=np.mean(theory_array,axis=0) #dtheory=np.std(theory_array,axis=0,ddof=1) #cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array)) #delta_array=np.sqrt(delta1**2+delta2**2) ##cross_array=np.add(cross1_array,cross2_array) #ipdb.set_trace() #cross=np.average(cross_array,weights=1./delta_array**2,axis=0) #cosmic=np.sqrt(cosmic1**2+cosmic2**2) #theory1=np.mean(theory1_array,axis=0) #dtheory1=np.std(theory1_array,axis=0,ddof=1) #cross1=np.mean(cross1_array,axis=0) #dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1) #dcross=np.average(delta_array,weights=1./delta_array**2,axis=0) #ipdb.set_trace() plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_FR_cut_{0:0>2d}'.format(cut), title='Faraday Rotation Correlator',theory=theory*1e12,dtheory=cosmic*1e12) #theory2=np.mean(theory2_array,axis=0) #dtheory2=np.std(theory2_array,axis=0,ddof=1) #cross2=np.mean(cross2_array,axis=0) ##delta2=np.mean(delta2_array,axis=0) #dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1) ##ipdb.set_trace() #plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12) #ipdb.set_trace() if b == 25 : a_scales=np.linspace(-10,10,1001) chi_array=[] for a in a_scales: chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2)) ind = np.argmin(chi_array) #likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi) likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05) Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2) #Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2)) \ Noise=np.sqrt(1./np.sum(1./dcross**2)) Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2) Noise1=np.sum(dcross*(theory/dcross)**2)/np.sum((theory/dcross)**2) SNR=Sig/Noise SNR1=Sig1/Noise1 #Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2) #Noise2=np.sqrt(1./np.sum(1./dcross**2)) #Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2) #Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2)) #SNR2=Sig2/Noise2 #SNR3=Sig3/Noise3 #ipdb.set_trace() fig,ax1=plt.subplots(1,1) ax1.plot(a_scales,likelihood,'k.') ax1.set_title('Faraday Rotation Correlator') ax1.set_xlabel('Likelihood scalar') ax1.set_ylabel('Likelihood of Correlation') fig.savefig('FR_Correlation_Likelihood.png',format='png') fig.savefig('FR_Correlation_Likelihood.eps',format='eps') #ipdb.set_trace() f=open('Maximum_likelihood_{0:0>2d}.txt'.format(cut),'w') f.write('Maximum Likelihood: {0:2.5f}% for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind]))) f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100)) f.write('Detection Levels using Standard Deviation \n') f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise)) f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise)) #f.write('Detection using Theoretical Noise \n') #f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2)) #f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3)) f.close() #if b == 1 : # xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T # vector=np.matrix(ll[1:]*cross[1:]).T # mu=np.matrix(ll[1:]*theory[1:]).T # fact=len(xbar)-1 # cov=(np.dot(xbar,xbar.T)/fact).squeeze() # ipdb.set_trace() # likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov))) # print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0])) # f=open('FR_likelihood.txt','w') # f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0])) # f.close() subprocess.call('mv Maximum_likelihood_cut_{:2>02d}.txt' .format(cut), shell=True) subprocess.call('mv *01.png bin_01/', shell=True) subprocess.call('mv *05.png bin_05/', shell=True) subprocess.call('mv *10.png bin_10/', shell=True) subprocess.call('mv *20.png bin_20/', shell=True) subprocess.call('mv *25.png bin_25/', shell=True) subprocess.call('mv *50.png bin_50/', shell=True) subprocess.call('mv *.eps eps/'.format(cut), shell=True)
if freq1 == 0 and freq2 == 0: set_estimator(cross, 143, 143) else: set_estimator(cross, freq1, freq2) fn_cmb1 = "{}/{:04}/smoothed_cmb_{:04}_{:03}.fits".format(mapcache, mc, mc, freq1) fn_cmb2 = "{}/{:04}/smoothed_cmb_{:04}_{:03}.fits".format(mapcache, mc, mc, freq2) m1 = npipeqml.smooth_and_degrade(fn_cmb1, bl, nsideqml) m2 = npipeqml.smooth_and_degrade(fn_cmb2, bl, nsideqml) ee, bb = cross.get_spectra(m1, m2) cl_cmb = np.zeros([4, lmaxqml + 1]) cl_cmb[1, 2:] = ee cl_cmb[2, 2:] = bb hp.write_cl(fn_cl_cmb, cl_cmb) # EE and BB print("Loading", fn_cl_cmb, flush=True) cl_cmb = hp.read_cl(fn_cl_cmb) # EE and BB print("Loading", fn_cl_clean, flush=True) cl_clean = hp.read_cl(fn_cl_clean) # EE and BB cl_in.append(cl_cmb[1:3]) cl_out.append(cl_clean[1:3]) cl_in = np.array(cl_in) cl_out = np.array(cl_out) nrow = 4 ncol = 4 for imode, mode in enumerate(["EE", "BB"]): if do_plot: plt.figure(figsize=[4 * ncol, 3 * nrow])
import os import sys from planck_util import log_bin npipegain = 1.002 npipefwhm = np.radians(.5 / 60) nbin = 300 fsky = 52 # 90, 52, 25 fig = plt.figure(figsize=[18, 12]) plt.suptitle('fsky = {}%'.format(fsky)) axes = [fig.add_subplot(2, 2, 1 + i) for i in range(4)] for freq1, freq2 in [(70, 100), (100, 143), (100, 217), (143, 217)]: name0 = '{:03}x{:03} Legacy'.format(freq1, freq2) cl0 = hp.read_cl('cl_{}dx12x{}dx12_{:02}fsky.fits'.format( freq1, freq2, fsky)) name1 = '{:03}x{:03} NPIPE'.format(freq1, freq2) cl1 = hp.read_cl('cl_{}x{}_{:02}fsky.fits'.format(freq1, freq2, fsky)) for freq in [freq1, freq2]: if freq > 70: cl1 *= npipegain lmax = cl0[0].size - 1 ell = np.arange(lmax + 1) ellbin, hits = log_bin(ell, nbin=nbin) norm = ell * (ell + 1) / 2 / np.pi * 1e12 npipebeam = hp.gauss_beam(npipefwhm, lmax=lmax) for i in range(2): cl0bin, hits = log_bin(norm * cl0[i], nbin=nbin) cl1bin, hits = log_bin(norm * cl1[i], nbin=nbin)
ax1.set_title('Faraday Rotatior Posterior') ax1.set_xlabel('Likelihood scalar') ax1.set_ylabel('Likelihood of Correlation') fig.savefig('FR_simulation_likelihood_'+name+'_'+title+'.png',format='png') fig.savefig('FR_simulation_likelihood_'+name+'_'+title+'.eps',format='eps') alpha_file='/data/wmap/faraday_MW_realdata.fits' #theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits') cmb_cls=hp.read_cl('/home/matt/wmap/simul_scalCls.fits.lens') synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits' dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits' dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits' dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits' ##Dust intensity scaling factor hdu_dust_t=fits.open(dust_t_file) dust_t=hdu_dust_t[1].data.field('TEMP_ML') hdu_dust_t.close() dust_t=hp.reorder(dust_t,n2r=1) dust_t=hp.ud_grade(dust_t,nside_in) hdu_dust_b=fits.open(dust_b_file)
def test_anafast_xspectra(self): cl = hp.anafast(self.map1[0], self.map2[0], lmax = self.lmax, regression=True) self.assertEqual(len(cl), self.lmax+1) clx = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_WVxspec_v4_udgraded32_II_lmax64_rmmono_3iter.fits')) np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def plot_mc(): f=open('cl_theory_FR_QxaU.json','r') theory1_array_in=json.load(f) f.close() f=open('cl_theory_FR_UxaQ.json','r') theory2_array_in=json.load(f) f.close() f=open('cl_array_FR_QxaU.json','r') cross1_array_in=json.load(f) f.close() f=open('cl_array_FR_UxaQ.json','r') cross2_array_in=json.load(f) f.close() # f=open('cl_noise_FR_QxaU.json','r') # noise1_array_in=json.load(f) # f.close() # f=open('cl_noise_FR_UxaQ.json','r') # noise2_array_in=json.load(f) # f.close() f=open('cl_Nau_FR_QxaU.json','r') Nau_array_in=json.load(f) f.close() f=open('cl_Ndq_FR_QxaU.json','r') Ndq_array_in=json.load(f) f.close() f=open('cl_Naq_FR_UxaQ.json','r') Naq_array_in=json.load(f) f.close() f=open('cl_Ndu_FR_UxaQ.json','r') Ndu_array_in=json.load(f) f.close() bins=[1,5,10,20,25,50] N_runs=500 # bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2 #bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2 bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2 #bls=np.repeat(1,3*nside_out) fsky=225.*(np.pi/180.)**2/(4*np.pi) l=np.arange(len(cross1_array_in[0])) ll=l*(l+1)/(2*np.pi) L=np.sqrt(fsky*4*np.pi) dl_eff=2*np.pi/L theory1_array_in=np.array(theory1_array_in)/(fsky*bls) theory2_array_in=np.array(theory2_array_in)/(fsky*bls) cross1_array_in=np.array(cross1_array_in)/(fsky*bls) cross2_array_in=np.array(cross2_array_in)/(fsky*bls) Ndq_array_in=np.array(Ndq_array_in)/(fsky) Ndu_array_in=np.array(Ndu_array_in)/(fsky) Nau_array_in=np.array(Nau_array_in)/(fsky) Naq_array_in=np.array(Naq_array_in)/(fsky) #noise1_array_in=np.array(noise1_array_in)/(fsky*bls) #noise2_array_in=np.array(noise2_array_in)/(fsky*bls) Ndq_array_in.shape += (1,) Ndu_array_in.shape += (1,) Nau_array_in.shape += (1,) Naq_array_in.shape += (1,) for b in bins: theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits') # N_dq=np.mean(Ndq_array_in) # N_au=np.mean(Nau_array_in) # #delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2) # N_du=np.mean(Ndu_array_in) # N_aq=np.mean(Naq_array_in) # #delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2) theory1_array=[] theory2_array=[] cross1_array=[] cross2_array=[] # noise1_array=[] # noise2_array=[] Ndq_array=[] Ndu_array=[] Nau_array=[] Naq_array=[] plot_l=[] if( b != 1): tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b) tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b) tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b) tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b) # tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b) # tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b) theory1_array=tmp_t1['llcl'] theory2_array=tmp_t2['llcl'] theory1_array.shape += (1,) theory2_array.shape += (1,) theory1_array=theory1_array.T theory2_array=theory2_array.T plot_l= tmp_t1['l_out'] cross1_array=tmp_c1['llcl'] cross2_array=tmp_c2['llcl'] # noise1_array=tmp_n1['llcl'] # noise2_array=tmp_n2['llcl'] Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl'] Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl'] Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl'] Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl'] tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b) #tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b) cosmic1=np.sqrt(tmp_c1['llcl']) #delta1=np.sqrt(tmp_d1['llcl']) tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b) #tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b) cosmic2=np.sqrt(tmp_c2['llcl']) #delta2=np.sqrt(tmp_d2['llcl']) t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b) theory_cls=t_tmp['llcl'] else: plot_l=l theory1_array=np.multiply(ll,theory1_array_in) cross1_array=np.multiply(ll,cross1_array_in) # noise1_array=np.multiply(ll,noise1_array_in) theory2_array=np.multiply(ll,theory2_array_in) cross2_array=np.multiply(ll,cross2_array_in) # noise2_array=np.multiply(ll,noise2_array_in) cosmic1*=ll cosmic2*=ll #delta1*=ll #delta2*=ll Ndq_array=np.multiply(ll,Ndq_array_in) Ndu_array=np.multiply(ll,Ndu_array_in) Naq_array=np.multiply(ll,Naq_array_in) Nau_array=np.multiply(ll,Nau_array_in) theory_cls*=ll #ipdb.set_trace() bad=np.where(plot_l < 24) N_dq=np.mean(Ndq_array,axis=0) N_du=np.mean(Ndu_array,axis=0) N_aq=np.mean(Naq_array,axis=0) N_au=np.mean(Nau_array,axis=0) #noise1=np.mean(noise1_array,axis=0) #noise2=np.mean(noise2_array,axis=0) theory1=np.mean(theory1_array,axis=0) theory2=np.mean(theory1_array,axis=0) theory_array = np.add(theory1_array,theory2_array) theory=np.mean(theory_array,axis=0) #dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1)) #cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2)) cross_array = np.add(cross1_array,cross2_array) cross=np.mean(cross_array,axis=0) #dcross=np.std(cross_array,axis=0,ddof=1) dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1))) cosmic=np.sqrt(cosmic1**2+cosmic2**2) delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.)) delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.)) delta=np.sqrt(delta1**2+delta2**2) #cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2))) #theory1=np.mean(theory1_array,axis=0) #dtheory1=np.std(theory1_array,axis=0,ddof=1) #cross1=np.mean(cross1_array,axis=0) #dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1) #ipdb.set_trace() good_l=np.logical_and(plot_l <= 250,plot_l >25) plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12) #theory2=np.mean(theory2_array,axis=0) #dtheory2=np.std(theory2_array,axis=0,ddof=1) #cross2=np.mean(cross2_array,axis=0) ##delta2=np.mean(delta2_array,axis=0) #dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1) ##ipdb.set_trace() #plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12) #ipdb.set_trace() if b == 25 : good_l=np.logical_and(plot_l <= 250,plot_l >25) likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr') #if b == 1 : # xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T # vector=np.matrix(ll[1:]*cross[1:]).T # mu=np.matrix(ll[1:]*theory[1:]).T # fact=len(xbar)-1 # cov=(np.dot(xbar,xbar.T)/fact).squeeze() ## ipdb.set_trace() # U,S,V =np.linalg.svd(cov) # _cov= np.einsum('ij,j,jk', V.T,1./S,U.T) # likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S))) ## print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0])) # f=open('FR_likelihood.txt','w') # f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0])) # f.close() subprocess.call('mv *01*.png bin_01/', shell=True) subprocess.call('mv *05*.png bin_05/', shell=True) subprocess.call('mv *10*.png bin_10/', shell=True) subprocess.call('mv *20*.png bin_20/', shell=True) subprocess.call('mv *25*.png bin_25/', shell=True) subprocess.call('mv *50*.png bin_50/', shell=True) subprocess.call('mv *.eps eps/', shell=True)
def main(): ##Parameters for Binning, Number of Runs ## Beam correction use_beam=0 # bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2 #bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2 bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2 N_runs=500 bins=[1,5,10,20,50] map_prefix='/home/matt/quiet/quiet_maps/' i_file=map_prefix+'quiet_simulated_43.1' j_file=map_prefix+'quiet_simulated_94.5' alpha_file='/data/wmap/faraday_MW_realdata.fits' bands=[43.1,94.5] names=['43','95'] wl=np.array([299792458./(band*1e9) for band in bands]) cross1_array_in=[] cross2_array_in=[] Ndq_array_in=[] Ndu_array_in=[] Nau_array_in=[] Naq_array_in=[] noise1_array_in=[] noise2_array_in=[] theory1_array_in=[] theory2_array_in=[] #simulate_fields.main() ttmp1,ttmp2=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam) theory1_array_in.append(ttmp1) theory2_array_in.append(ttmp2) #for n in xrange(N_runs): for i in xrange(N_runs): print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(i+1)+Back.RESET+Fore.RESET+Style.RESET_ALL) tmp1,tmp2,n1,n2,n3,n4=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam) # ntmp1,ntmp2=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam) cross1_array_in.append(tmp1) cross2_array_in.append(tmp2) Ndq_array_in.append(n1) Ndu_array_in.append(n2) Nau_array_in.append(n3) Naq_array_in.append(n4) # noise1_array_in.append(ntmp1) # noise2_array_in.append(ntmp2) f=open('cl_theory_FR_QxaU.json','w') json.dump(np.array(theory1_array_in).tolist(),f) f.close() f=open('cl_theory_FR_UxaQ.json','w') json.dump(np.array(theory2_array_in).tolist(),f) f.close() theory1=np.mean(theory1_array_in,axis=0) theory2=np.mean(theory2_array_in,axis=0) hp.write_cl('cl_theory_FR_QxaU.fits',theory1) hp.write_cl('cl_theory_FR_UxaQ.fits',theory2) #f=open('cl_theory_FR_QxaU.json','r') #theory1_array=json.load(f) #f.close() #f=open('cl_theory_FR_UxaQ.json','r') #theory2_array=json.load(f) #f.close() f=open('cl_array_FR_QxaU.json','w') json.dump(np.array(cross1_array_in).tolist(),f) f.close() f=open('cl_array_FR_UxaQ.json','w') json.dump(np.array(cross2_array_in).tolist(),f) f.close() f=open('cl_Ndq_FR_QxaU.json','w') json.dump(np.array(Ndq_array_in).tolist(),f) f.close() f=open('cl_Ndu_FR_UxaQ.json','w') json.dump(np.array(Ndu_array_in).tolist(),f) f.close() f=open('cl_Nau_FR_QxaU.json','w') json.dump(np.array(Nau_array_in).tolist(),f) f.close() f=open('cl_Naq_FR_UxaQ.json','w') json.dump(np.array(Naq_array_in).tolist(),f) f.close() #f=open('cl_noise_FR_QxaU.json','w') #json.dump(np.array(noise1_array_in).tolist(),f) #f.close() #f=open('cl_noise_FR_UxaQ.json','w') #json.dump(np.array(noise2_array_in).tolist(),f) #f.close() bins=[1,5,10,20,25,50] fsky=225.*(np.pi/180.)**2/(4*np.pi) l=np.arange(len(cross1_array_in[0])) ll=l*(l+1)/(2*np.pi) L=np.sqrt(fsky*4*np.pi) dl_eff=2*np.pi/L theory1_array_in=np.array(theory1_array_in)/(fsky*bls) theory2_array_in=np.array(theory2_array_in)/(fsky*bls) cross1_array_in=np.array(cross1_array_in)/(fsky*bls) cross2_array_in=np.array(cross2_array_in)/(fsky*bls) Ndq_array_in=np.array(Ndq_array_in)/(fsky) Ndu_array_in=np.array(Ndu_array_in)/(fsky) Nau_array_in=np.array(Nau_array_in)/(fsky) Naq_array_in=np.array(Naq_array_in)/(fsky) #noise1_array_in=np.array(noise1_array_in)/(fsky*bls) #noise2_array_in=np.array(noise2_array_in)/(fsky*bls) Ndq_array_in.shape += (1,) Ndu_array_in.shape += (1,) Nau_array_in.shape += (1,) Naq_array_in.shape += (1,) for b in bins: theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits') # N_dq=np.mean(Ndq_array_in) # N_au=np.mean(Nau_array_in) # #delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2) # N_du=np.mean(Ndu_array_in) # N_aq=np.mean(Naq_array_in) # #delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) # delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)) cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2) theory1_array=[] theory2_array=[] cross1_array=[] cross2_array=[] # noise1_array=[] # noise2_array=[] Ndq_array=[] Ndu_array=[] Nau_array=[] Naq_array=[] plot_l=[] if( b != 1): tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b) tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b) tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b) tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b) # tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b) # tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b) theory1_array=tmp_t1['llcl'] theory2_array=tmp_t2['llcl'] theory1_array.shape += (1,) theory2_array.shape += (1,) theory1_array=theory1_array.T theory2_array=theory2_array.T plot_l= tmp_t1['l_out'] cross1_array=tmp_c1['llcl'] cross2_array=tmp_c2['llcl'] # noise1_array=tmp_n1['llcl'] # noise2_array=tmp_n2['llcl'] Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl'] Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl'] Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl'] Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl'] tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b) #tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b) cosmic1=np.sqrt(tmp_c1['llcl']) #delta1=np.sqrt(tmp_d1['llcl']) tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b) #tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b) cosmic2=np.sqrt(tmp_c2['llcl']) #delta2=np.sqrt(tmp_d2['llcl']) t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b) theory_cls=t_tmp['llcl'] else: plot_l=l theory1_array=np.multiply(ll,theory1_array_in) cross1_array=np.multiply(ll,cross1_array_in) # noise1_array=np.multiply(ll,noise1_array_in) theory2_array=np.multiply(ll,theory2_array_in) cross2_array=np.multiply(ll,cross2_array_in) # noise2_array=np.multiply(ll,noise2_array_in) cosmic1*=ll cosmic2*=ll #delta1*=ll #delta2*=ll Ndq_array=np.multiply(ll,Ndq_array_in) Ndu_array=np.multiply(ll,Ndu_array_in) Naq_array=np.multiply(ll,Naq_array_in) Nau_array=np.multiply(ll,Nau_array_in) theory_cls*=ll #ipdb.set_trace() bad=np.where(plot_l < 24) N_dq=np.mean(Ndq_array,axis=0) N_du=np.mean(Ndu_array,axis=0) N_aq=np.mean(Naq_array,axis=0) N_au=np.mean(Nau_array,axis=0) #noise1=np.mean(noise1_array,axis=0) #noise2=np.mean(noise2_array,axis=0) theory1=np.mean(theory1_array,axis=0) theory2=np.mean(theory1_array,axis=0) theory_array = np.add(theory1_array,theory2_array) theory=np.mean(theory_array,axis=0) #dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1)) #cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2)) cross_array = np.add(cross1_array,cross2_array) cross=np.mean(cross_array,axis=0) #dcross=np.std(cross_array,axis=0,ddof=1) dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1))) cosmic=np.sqrt(cosmic1**2+cosmic2**2) delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.)) delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.)) delta=np.sqrt(delta1**2+delta2**2) #cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2))) #theory1=np.mean(theory1_array,axis=0) #dtheory1=np.std(theory1_array,axis=0,ddof=1) #cross1=np.mean(cross1_array,axis=0) #dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1) #ipdb.set_trace() plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12) #theory2=np.mean(theory2_array,axis=0) #dtheory2=np.std(theory2_array,axis=0,ddof=1) #cross2=np.mean(cross2_array,axis=0) ##delta2=np.mean(delta2_array,axis=0) #dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1) ##ipdb.set_trace() #plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12) #ipdb.set_trace() if b == 25 : good_l=np.logical_and(plot_l <= 200,plot_l >25) likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr') #if b == 1 : # xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T # vector=np.matrix(ll[1:]*cross[1:]).T # mu=np.matrix(ll[1:]*theory[1:]).T # fact=len(xbar)-1 # cov=(np.dot(xbar,xbar.T)/fact).squeeze() ## ipdb.set_trace() # U,S,V =np.linalg.svd(cov) # _cov= np.einsum('ij,j,jk', V.T,1./S,U.T) # likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S))) ## print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0])) # f=open('FR_likelihood.txt','w') # f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0])) # f.close() subprocess.call('mv *01*.png bin_01/', shell=True) subprocess.call('mv *05*.png bin_05/', shell=True) subprocess.call('mv *10*.png bin_10/', shell=True) subprocess.call('mv *20*.png bin_20/', shell=True) subprocess.call('mv *25*.png bin_25/', shell=True) subprocess.call('mv *50*.png bin_50/', shell=True) subprocess.call('mv *.eps eps/', shell=True)
def correlate_theory(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,gal_cut=0.,mask_file=None): print "Computing Cross Correlations for Bands "+str(bands_name) radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits.lens' hdu_i=fits.open(i_file) hdu_j=fits.open(j_file) #iqu_band_i=hdu_i['stokes iqu'].data #iqu_band_j=hdu_j['stokes iqu'].data nside_i=hdu_i['stokes iqu'].header['nside'] nside_j=hdu_j['stokes iqu'].header['nside'] hdu_i.close() hdu_j.close() ind_i=np.where( wl == wl_i)[0][0] ind_j=np.where( wl == wl_j)[0][0] cls=hp.read_cl(cl_file) simul_cmb=hp.sphtfunc.synfast(cls,max(nside_i,nside_j),fwhm=0.,new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); ##Generate CMB for file J alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_j,order_in='ring',order_out='ring') simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_j) tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_j,alpha_radio); iqu_band_j=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_j]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False) ##Generate CMB for file I alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_i,order_in='ring',order_out='ring') simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_i) tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_i,alpha_radio); #ipdb.set_trace() iqu_band_i=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_i]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False) alpha_radio=hp.read_map(alpha_file,hdu='maps/phi') iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False) iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False) #alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383) iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring') iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring') const=2.*(wl_i**2-wl_j**2) Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const alpha_u=alpha_radio*iqu_band_j[2] alpha_q=-alpha_radio*iqu_band_j[1] DQm=hp.ma(Delta_Q) DUm=hp.ma(Delta_U) aQm=hp.ma(alpha_q) aUm=hp.ma(alpha_u) Bl_factor=np.repeat(1.,3*nside_out) #ipdb.set_trace() if beam: Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1) pix_area=hp.nside2pixarea(nside_out) #ipdb.set_trace() mask_bool=np.repeat(False,npix_out) if gal_cut > 0: pix=np.arange(hp.nside2npix(nside_out)) x,y,z=hp.pix2vec(nside,pix,nest=0) mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.) #mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True if not (mask_file is None): mask_hdu=fits.open(mask_file) mask=mask_hdu[1].data.field(0) mask_hdu.close() mask=hp.reorder(mask,n2r=1) mask=hp.ud_grade(mask,nside_out=128) mask_bool=~mask.astype(bool) fsky= 1. - np.sum(mask)/float(len(mask)) L=np.sqrt(fsky*4*np.pi) dl_eff=2*np.pi/L DQm.mask=mask_bool DUm.mask=mask_bool aQm.mask=mask_bool aUm.mask=mask_bool cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2 cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2 #cross1=np.mean(cross1_array,axis=0) ##Average over all Cross Spectra #cross2=np.mean(cross2_array,axis=0) ##Average over all Cross Spectra return (cross1,cross2)
Cl, Dl = cl_dl(ell, r=r) delta_Dls = delta_dl( q, ell, delta_ell, f_sky, apodization_factor, N_h, NET, omega, N_eq, time, optical_efficiency, r=r, only_noise=True, kind='Interferometer') #Comparison with montecarlos #Xpol xpols = np.load('/home/fincardona/Qubic/map_making/Xpol/delta_Cls_' + 'n{}_N{}_bin{}_{}MAPS_s{}_mi{}.npy'.format(NFREQS, NPOINTS, delta_ell, 100, len(sampling), maxiter)) ell_binned_xpol = xpols[0] delta_Cls_xpol = xpols[1] delta_Dls_xpol = cl2dl(delta_Cls_xpol, ell_binned_xpol) #Xpure -- only delta_ell = 20 ell_binned_xpure = hp.read_cl('./bin_xpure_uniform_federico.fits') delta_Dls_xpure = hp.read_cl('./deltaDl_BB_xpure_uniform_federico.fits') #Display mp.figure() mp.loglog(ell, Dl, label='$D_\ell^{BB}\ $') mp.plot(ell, delta_Dls, '--', label='$ \Delta D_\ell^{BB}\ Battistelli$') mp.plot(ell_binned_xpol, delta_Dls_xpol[2], '-v', label='$ \Delta D_\ell^{BB}\ Xpol$') mp.plot(ell_binned_xpure, delta_Dls_xpure, '-o', label='$ \Delta D_\ell^{BB}\ Xpure$') mp.xlim(40, 300) mp.ylim([1e-4, 1e-1]) mp.xlabel('$\ell$', fontsize=16) mp.ylabel('$[ {\mu k}^2 ]$') mp.title('r = {}; n{} N{}'.format(r, q.filter.NFREQS, q.detector.NPOINTS))
def main(): t1=time() radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits' output_prefix='/home/matt/quiet/quiet_maps/' nside=1024 nside_in=1024 npix=hp.nside2npix(nside) bands=[43.1,94.5] q_fwhm=[27.3,11.7] pix_area= np.sqrt(hp.nside2pixarea(1024))*60*180./np.pi noise_const_q=np.array([36./pix_area for f in q_fwhm])*1e-6 # noise_const_q=np.array([36./fwhm for fwhm in q_fwhm])*1e-6 centers=np.array([convertcenter([12,4],-39),convertcenter([5,12],-39),convertcenter([0,48],-48),convertcenter([22,44],-36)]) wl=np.array([299792458./(band*1e9) for band in bands]) synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits' dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits' dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits' dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits' ##Dust intensity scaling factor hdu_dust_t=fits.open(dust_t_file) dust_t=hdu_dust_t[1].data.field('TEMP_ML') hdu_dust_t.close() dust_t=hp.reorder(dust_t,n2r=1) dust_t=hp.ud_grade(dust_t,nside_in) hdu_dust_b=fits.open(dust_b_file) dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL') hdu_dust_b.close dust_beta=hp.reorder(dust_beta,n2r=1) dust_beta=hp.ud_grade(dust_beta,nside_in) gamma_dust=6.626e-34/(1.38e-23*dust_t) krj_to_kcmb=np.array([1.,1.]) sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in bands]) dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353.)**(1+dust_beta) for i,x in enumerate(bands)]) print('Preparing Foregrounds') bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*1024-1) bl_10=hp.gauss_beam(10.*np.pi/(180.*60.),3*1024-1) hdu_sync=fits.open(synchrotron_file) sync_q=hdu_sync[1].data.field(0) sync_u=hdu_sync[1].data.field(1) sync_q=hp.reorder(sync_q,n2r=1) tmp_alm=hp.map2alm(sync_q) tmp_alm=hp.almxfl(tmp_alm,1./bl_40) #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True) sync_q=hp.alm2map(tmp_alm,nside_in,verbose=False) sync_u=hp.reorder(sync_u,n2r=1) tmp_alm=hp.map2alm(sync_u) tmp_alm=hp.almxfl(tmp_alm,1./bl_40) #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True) sync_u=hp.alm2map(tmp_alm,nside_in,verbose=False) hdu_sync.close() hdu_dust=fits.open(dust_file) dust_q=hdu_dust[1].data.field(0) dust_u=hdu_dust[1].data.field(1) hdu_dust.close() dust_q=hp.reorder(dust_q,n2r=1) tmp_alm=hp.map2alm(dust_q) tmp_alm=hp.almxfl(tmp_alm,1./bl_10) #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True) dust_q=hp.alm2map(tmp_alm,nside_in,verbose=False) dust_q_back=np.copy(dust_q) dust_u=hp.reorder(dust_u,n2r=1) tmp_alm=hp.map2alm(dust_u) tmp_alm=hp.almxfl(tmp_alm,1./bl_10) #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True) dust_u=hp.alm2map(tmp_alm,nside_in,verbose=False) dust_u_back=np.copy(dust_u) print 'Generating Map' cls=hp.read_cl(cl_file) simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring') num_wl=len(wl) no_noise=[] t_array=np.zeros((num_wl,npix)) q_array=np.zeros((num_wl,npix)) sigma_q=np.zeros((num_wl,npix)) u_array=np.zeros((num_wl,npix)) sigma_u=np.zeros((num_wl,npix)) for i in range(num_wl): print('\tFrequency: {0:2.1f}'.format(bands[i])) tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio); no_noise.append(hp.smoothing(np.copy(tmp_cmb), fwhm=q_fwhm[i]*np.pi/(180*60.),pol=1,verbose=False)) sigma_q[i]=np.random.normal(0,1,npix)*noise_const_q[i] sigma_u[i]=np.random.normal(0,1,npix)*noise_const_q[i] tmp_cmb[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q ) tmp_cmb[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u ) tmp_out=hp.sphtfunc.smoothing(tmp_cmb,fwhm=q_fwhm[i]*np.pi/(180.*60.),pol=1,verbose=False) t_array[i],q_array[i],u_array[i]=tmp_out #sigma_q[i]=hp.sphtfunc.smoothing(tmp_q,fwhm=np.pi/180.) #sigma_u[i]=hp.sphtfunc.smoothing(tmp_u,fwhm=np.pi/180.) print "Time to Write Fields" dx=1./(60.)*3 nx=np.int(15/dx) ny=nx all_pix=[] field_pix=[] square_pix=[] quiet_mask=np.zeros(npix) prim=fits.PrimaryHDU() prim.header['COMMENT']="Simulated Quiet Data" prim.header['COMMENT']="Created using CAMB" for p in xrange(len(centers)): coords=regioncoords(centers[p,0],centers[p,1],dx,nx,ny) coords_sky=SkyCoord(ra=coords[:,0],dec=coords[:,1],unit=u.degree,frame='fk5') phi=coords_sky.galactic.l.deg*np.pi/180. theta=(90-coords_sky.galactic.b.deg)*np.pi/180. pixels=hp.ang2pix(nside,theta,phi) quiet_mask[pixels]=1 unique_pix=(np.unique(pixels).tolist()) field_pix.append(unique_pix) square_pix.append(pixels) all_pix.extend(unique_pix) pix_col=fits.Column(name='PIXEL',format='1J',array=unique_pix) for f in xrange(num_wl): region_mask=np.zeros(npix) region_mask[pixels]=1 region_map_t=np.array(t_array[f][pixels]).reshape((nx,ny)) region_map_q=np.array(q_array[f][pixels]).reshape((nx,ny)) region_map_u=np.array(u_array[f][pixels]).reshape((nx,ny)) region_delta_q=np.array(sigma_q[f][pixels]).reshape((nx,ny)) region_delta_u=np.array(sigma_u[f][pixels]).reshape((nx,ny)) prim=fits.PrimaryHDU() q_head=fits.ImageHDU([region_map_t,region_map_q,region_map_u],name="STOKES IQU") q_head.header['TFIELDS']=(3,'number of fields in each row') q_head.header['TTYPE1']=('SIGNAL', "STOKES I, Temperature") q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TTYPE2']='STOKES Q' q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TTYPE3']='STOKES U' q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TFORM1']='E' q_head.header['TFORM1']='E' q_head.header['TFORM2']='E' q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") q_head.header["COORDSYS"]=('G','Pixelization coordinate system') q_head.header['NSIDE']=(1024,'Healpix Resolution paramter') q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') q_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT') err_head=fits.ImageHDU([region_delta_q,region_delta_u],name="Q/U UNCERTAINTIES") err_head.header['TFIELDS']=(2,'number of fields in each row') err_head.header['NSIDE']=1024 err_head.header['ORDERING']='RING' err_head.header['TTYPE1']='SIGMA Q' err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map') err_head.header['TTYPE2']='SIGMA U' err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map') err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") err_head.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL') err_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT') m_head=fits.ImageHDU(region_mask,name='MASK') sqr_pix_col=fits.Column(name='PIXELS',format='1J',array=pixels) sqr_pix_cols=fits.ColDefs([sqr_pix_col]) sqr_pix_head=fits.BinTableHDU.from_columns(sqr_pix_cols) hdulist=fits.HDUList([prim,q_head,err_head,m_head,sqr_pix_head]) hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}_cmb{:1d}.fits".format(bands[f],p+1),clobber=True) print '{:.1f}_cmb{:1d}.fits'.format(bands[f],p+1) mask_head=fits.ImageHDU(quiet_mask,name='MASK') pix_col=fits.Column(name='PIXEL',format='1J',array=all_pix) field_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=field_pix[0]) field_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=field_pix[1]) field_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=field_pix[2]) field_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=field_pix[3]) sqr_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=square_pix[0]) sqr_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=square_pix[1]) sqr_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=square_pix[2]) sqr_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=square_pix[3]) cols1=fits.ColDefs([sqr_pix_col1,sqr_pix_col2,sqr_pix_col3,sqr_pix_col4]) tbhdu1=fits.BinTableHDU.from_columns(cols1) tbhdu1.header['TFIELDS']=(4,'number of fields in each row') tbhdu1.header["TTYPE1"]=("PIXELS CMB FIELD 1","SQUARE PIXEL NUMBER BY FIELD") tbhdu1.header["TTYPE2"]=("PIXELS CMB FIELD 2","SQUARE PIXEL NUMBER BY FIELD") tbhdu1.header["TTYPE3"]=("PIXELS CMB FIELD 3","SQUARE PIXEL NUMBER BY FIELD") tbhdu1.header["TTYPE4"]=("PIXELS CMB FIELD 4","SQUARE PIXEL NUMBER BY FIELD") tbhdu1.header["EXTNAME"]="SQUARE PIXELS" tbhdu1.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") tbhdu1.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") tbhdu1.header["NSIDE"]=(nside,'Healpix Resolution paramter') tbhdu1.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL') tbhdu1.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed') tbhdu1.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') tbhdu1.header["COORDSYS"]=('G','Pixelization coordinate system') for i in xrange(num_wl): cut_t,cut_q,cut_u=t_array[i][all_pix],q_array[i][all_pix],u_array[i][all_pix] cut_dq,cut_du=sigma_q[i][all_pix],sigma_u[i][all_pix] col_t=fits.Column(name='SIGNAL',format='1E',unit='K_{CMB}',array=cut_t) col_q=fits.Column(name='STOKES Q',format='1E',unit='K_{CMB}',array=cut_q) col_u=fits.Column(name='STOKES U',format='1E',unit='K_{CMB}',array=cut_u) col_dq=fits.Column(name='Q ERROR',format='1E',unit='K_{CMB}',array=cut_dq) col_du=fits.Column(name='U ERROR',format='1E',unit='K_{CMB}',array=cut_du) cols=fits.ColDefs([pix_col,col_q,col_u,col_dq,col_du]) tbhdu=fits.BinTableHDU.from_columns(cols) tbhdu.header['TFIELDS']=(5,'number of fields in each row') tbhdu.header["TTYPE2"]=("SIGNAL","STOKES T") tbhdu.header["EXTNAME"]="SIGNAL" tbhdu.header['POLAR']= 'T' tbhdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") tbhdu.header["NSIDE"]=(1024,'Healpix Resolution paramter') tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL') tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed') tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system') tblist=fits.HDUList([prim,tbhdu]) tblist.writeto(output_prefix+'quiet_partial_simulated_{:.1f}.fits'.format(bands[i]),clobber=True) q_head=fits.ImageHDU(np.array([t_array[i],q_array[i],u_array[i]]), name='STOKES IQU') q_head.header['TFIELDS']=(3,'number of fields in each row') q_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature") q_head.header['TYPE2']='STOKES Q' q_head.header['TYPE3']='STOKES U' q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map') q_head.header['TFORM1']='E' q_head.header['TFORM1']='E' q_head.header['TFORM2']='E' q_head.header['EXTNAME']='STOKES IQU' q_head.header['POLAR']= 'T' q_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") q_head.header['NSIDE']=(1024,'Healpix Resolution paramter') q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') q_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') q_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels') q_head.header["COORDSYS"]=('G','Pixelization coordinate system') ######################### theo_head=fits.ImageHDU(no_noise[i], name='No Noise IQU') theo_head.header['TFIELDS']=(3,'number of fields in each row') theo_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature") theo_head.header['TYPE2']='STOKES Q' theo_head.header['TYPE3']='STOKES U' theo_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map') theo_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map') theo_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map') theo_head.header['TFORM1']='E' theo_head.header['TFORM1']='E' theo_head.header['TFORM2']='E' theo_head.header['EXTNAME']='no noise iqu' theo_head.header['POLAR']= 'T' theo_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') theo_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") theo_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") theo_head.header['NSIDE']=(1024,'Healpix Resolution paramter') theo_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') theo_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') theo_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels') theo_head.header["COORDSYS"]=('G','Pixelization coordinate system') #################################### #tblist=fits.HDUList([prim,tbhdu]) err_head=fits.ImageHDU(np.array([sigma_q[i],sigma_u[i]]),name='Q/U UNCERTAINTIES') err_head.header['TFIELDS']=(2,'number of fields in each row') err_head.header['NSIDE']=1024 err_head.header['ORDERING']='RING' err_head.header['TTYPE1']='SIGMA Q' err_head.header['TTYPE2']='SIGMA U' err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map') err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map') err_head.header['TFORM1']='E' err_head.header['TFORM2']='E' err_head.header['EXTNAME']='Q/U UNCERTAINTIES' err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") err_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') err_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') err_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels') cols=fits.ColDefs([field_pix_col1,field_pix_col2,field_pix_col3,field_pix_col4]) tbhdu=fits.BinTableHDU.from_columns(cols) tbhdu.header['TFIELDS']=(4,'number of fields in each row') tbhdu.header["TTYPE1"]=("PIXELS CMB FIELD 1","PIXEL NUMBER BY FIELD") tbhdu.header["TTYPE2"]=("PIXELS CMB FIELD 2","PIXEL NUMBER BY FIELD") tbhdu.header["TTYPE3"]=("PIXELS CMB FIELD 3","PIXEL NUMBER BY FIELD") tbhdu.header["TTYPE4"]=("PIXELS CMB FIELD 4","PIXEL NUMBER BY FIELD") tbhdu.header["EXTNAME"]="FIELD PIXELS" tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") tbhdu.header["NSIDE"]=(nside,'Healpix Resolution paramter') tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL') tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed') tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system') hdulist=fits.HDUList([prim,q_head,err_head,mask_head,tbhdu,tbhdu1,theo_head]) hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}.fits".format(bands[i]),clobber=True) print "quiet_simulated_{:.1f}.fits".format(bands[i])
def make_cmb_sims(params): """ Write cmb maps on disk Parameters ---------- params: module contating all the simulation parameters """ nmc_cmb = params.nmc_cmb nside = params.nside smooth = params.gaussian_smooth ch_name = [ 'SO_SAT_27', 'SO_SAT_39', 'SO_SAT_93', 'SO_SAT_145', 'SO_SAT_225', 'SO_SAT_280' ] freqs = sonc.Simons_Observatory_V3_SA_bands() beams = sonc.Simons_Observatory_V3_SA_beams() band_int = params.band_int parallel = params.parallel root_dir = params.out_dir out_dir = f'{root_dir}/cmb/' file_str = params.file_string seed_cmb = params.seed_cmb cmb_ps_file = params.cmb_ps_file rank = 0 size = 1 if params.parallel: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if not os.path.exists(out_dir) and rank == 0: os.makedirs(out_dir) if cmb_ps_file: print(cmb_ps_file) cl_cmb = hp.read_cl(cmb_ps_file) else: cmb_ps_scalar_file = os.path.join(os.path.dirname(__file__), 'datautils/Cls_Planck2018_r0.fits') cl_cmb_scalar = hp.read_cl(cmb_ps_scalar_file) cmb_ps_tensor_r1_file = os.path.join( os.path.dirname(__file__), 'datautils/Cls_Planck2018_tensor_r1.fits') cmb_r = params.cmb_r cl_cmb_tensor = hp.read_cl(cmb_ps_tensor_r1_file) * cmb_r cl_cmb = cl_cmb_scalar + cl_cmb_tensor nmc_cmb = math.ceil(nmc_cmb / size) * size if nmc_cmb != params.nmc_cmb: print_rnk0(f'WARNING: setting nmc_cmb = {nmc_cmb}', rank) perrank = nmc_cmb // size for nmc in range(rank * perrank, (rank + 1) * perrank): if seed_cmb: np.random.seed(seed_cmb + nmc) nmc_str = str(nmc).zfill(4) if not os.path.exists(out_dir + nmc_str): os.makedirs(out_dir + nmc_str) cmb_temp = hp.synfast(cl_cmb, nside, new=True, verbose=False) file_name = f'cmb_{nmc_str}_{file_str}.fits' file_tot_path = f'{out_dir}{nmc_str}/{file_name}' hp.write_map(file_tot_path, cmb_temp, overwrite=True, dtype=np.float32) os.environ["PYSM_LOCAL_DATA"] = f'{out_dir}' sky = pysm3.Sky(nside=nside, component_objects=[ pysm3.CMBMap(nside, map_IQU=f'{nmc_str}/{file_name}') ]) for nch, chnl in enumerate(ch_name): freq = freqs[nch] fwhm = beams[nch] cmb_map = sky.get_emission(freq * u.GHz) cmb_map = cmb_map.to(u.uK_CMB, equivalencies=u.cmb_equivalencies(freq * u.GHz)) if smooth: cmb_map_smt = hp.smoothing(cmb_map, fwhm=np.radians(fwhm / 60.), verbose=False) else: cmb_map_smt = cmb_map file_name = f'{chnl}_cmb_{nmc_str}_{file_str}.fits' file_tot_path = f'{out_dir}{nmc_str}/{file_name}' hp.write_map(file_tot_path, cmb_map_smt, overwrite=True, dtype=np.float32)
lmax = len(cross_cls) beam_lmax = lmax l = np.arange(beam_lmax) ll = l*(l+1)/(2*np.pi) beam_14 = hp.gauss_beam(14.4*np.pi/(180.*60.),beam_lmax-1) beam_5 = hp.gauss_beam(5.*np.pi/(180.*60.),beam_lmax-1) b14_180 = hp.gauss_beam(np.sqrt((3*60.)**2 - (14.4)**2)*np.pi/(180.*60.),beam_lmax-1) b5_180 = hp.gauss_beam(np.sqrt((3*60.)**2 - (5.)**2)*np.pi/(180.*60.),beam_lmax-1) beam_14 *= (1. - b14_180) beam_5 *= (1. - b5_180) pix = hp.pixwin(256)[:beam_lmax] theory_cls= hp.read_cl(theory_cl_file) theory_cls=theory_cls[0][:beam_lmax] #theory_cls[:2]=1e-10 cross_cls = cross_cls[:beam_lmax] radio_cls = radio_cls[:beam_lmax] cmb_cls = cmb_cls[:beam_lmax] wls = hp.anafast((~radio_fr.mask).astype(float))[:beam_lmax] fskyw2 = np.sum([(2*m+1)*wls[mi] if m > 0 else 0 for mi,m in enumerate(xrange(len(wls)))])/(4*np.pi) wls = wls fsky = 1. - np.sum(mask_bool).astype(float)/len(mask_bool) L = np.sqrt(4*np.pi*fsky) dl_eff = 2*np.pi/L
pool.close() pool.join()''' '''pool2 = mg.Pool(nprocess) ffp6_output = pool2.map(ffp6,np.arange(9)) pool2.close() pool2.join()''' fig, ax = plt.subplots(3, 3) ax = ax.ravel() stepno = 10 planckcls = [None] * 9 ffp6cls = [None] * 9 oldffp6cls = [None] * 9 for i in xrange(len(planckcls)): planckclsfits = planckdir + planckroot + planckcode[i] + planckclsend planckcls[i] = hp.read_cl(planckclsfits) ffp6clsfits = ffp6dir + ffp6dircode[i] + planckprefix[ i] + ffp6root + ffp6code[i] + ffp6clsend ffp6cls[i] = hp.read_cl(ffp6clsfits) oldffp6clsfits = '/Users/keir/Documents/s2let_ilc_planck/ffp6_data/' + 'ffp6_fiducial_withPS_' + planckcode[ i] + '_cls.fits' oldffp6cls[i] = hp.read_cl(oldffp6clsfits) ax[i].set_xlabel(r"Multipole $\ell$") ax[i].set_ylabel( r"Power spectrum $\frac{\ell (\ell + 1) C_{\ell}}{2 \pi}$ (uK^2)") if i < 9: #ax[i].set_ylim([0,1e5]) ax[i].set_yscale('log') ax[i].plot(ell[::stepno], planckcls[i][::stepno] * ell[::stepno] * (ell[::stepno] + 1) * (10**12) * invtwopi,
output=np.zeros(3) deltas=np.zeros(3) for i in xrange(3): tmp1,out1,tmp2=np.percentile(data[:,i],[16,50,84]) delta=np.max([out1-tmp1,tmp2-out1]) output[i]=out1 deltas[i]=delta return np.reshape([output,deltas],6) if __name__=="__main__": t1=time() radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits' nside=128 npix=hp.nside2npix(nside) cls=hp.read_cl(cl_file) simul_cmb=hp.sphtfunc.synfast(cls,512,fwhm=13.*np.pi/(180.*60.),new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); alpha_radio=hp.ud_grade(alpha_radio,nside_out=512,order_in='ring',order_out='ring') names=['K','Ka','Q','V','W'] bands=[23,33,41,61,94] wl=np.array([299792458./(band*1e9) for band in bands]) num_wl=len(wl) npix1=hp.nside2npix(512) map_prefix='/data/wmap/wmap_band_iqumap_r9_9yr_' simul_prefix='/data/mwap/simul_fr_rotated_' wmap_files=[ map_prefix+name+'_v5.fits' for name in names]
def test_anafast_xspectra(self): cl = hp.anafast(self.map1[0].filled(), self.map2[0].filled(), lmax = 1024) self.assertEqual(len(cl), 1025) clx = hp.read_cl(os.path.join(self.path, 'data', 'clx.fits')) np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def main(): ##Define Files used to make maps radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits.lens' output_prefix='/home/matt/prism/simul_maps/' ##Define Parameters used to simulate PRISM Fields nside_out=128 npix_out=hp.nside2npix(nside_out) bands=np.array([30.,36.,51.,105.,135.,160.]) wl=np.array([299792458./(b*1e9) for b in bands]) beam_fwhm=np.array([17.,14.,10.,4.8,3.8,3.2]) n_det=np.array([50,100,150,250,300,350]) noise_const_temp=np.array([63.4,59.7,53.7,45.6,44.9,45.5])/np.sqrt(n_det)*1e-6 noise_const_pol=np.array([89.7,84.5,75.9,64.4,63.4,64.3])/np.sqrt(n_det)*1e-6 nside=2048 npix=hp.nside2npix(nside) pix_area=hp.nside2pixarea(nside) num_wl=len(wl) tqu_array=[] sigma_array=[] cls=hp.read_cl(cl_file) print 'Generating Map' simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring') nside=2048 pix_area=hp.nside2pixarea(nside) prim=fits.PrimaryHDU() prim.header['COMMENT']="Simulated Planck Data with Polarization" prim.header['COMMENT']="Created using CAMB" #ipdb.set_trace() for i in range(num_wl): tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio); tmp_didqdu=np.array([np.random.normal(0,1,npix)*noise_const_temp[i], np.random.normal(0,1,npix)*noise_const_pol[i] , np.random.normal(0,1,npix)*noise_const_pol[i]]) tmp_tqu=hp.sphtfunc.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[i]*np.pi/(180.*60.))**2 - pix_area),pol=1) sig_hdu=fits.ImageHDU(tmp_tqu) sig_hdu.header['TFIELDS']=(len(tmp_tqu),'number of fields in each row') sig_hdu.header["TTYPE1"]=("STOKES I") sig_hdu.header["TTYPE2"]=("STOKES Q") sig_hdu.header["TTYPE3"]=("STOKES U") sig_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TFORM1"]='E' sig_hdu.header["TFORM2"]='E' sig_hdu.header["TFORM3"]='E' sig_hdu.header["EXTNAME"]="STOKES IQU" sig_hdu.header['POLAR']= 'T' sig_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') sig_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") sig_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") sig_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter') sig_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') sig_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed') sig_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') sig_hdu.header["COORDSYS"]=('G','Pixelization coordinate system') err_hdu=fits.ImageHDU(tmp_didqdu) err_hdu.header['TFIELDS']=(len(tmp_didqdu),'number of fields in each row') err_hdu.header["TTYPE1"]=("UNCERTAINTY I") err_hdu.header["TTYPE2"]=("UNCERTAINTY Q") err_hdu.header["TTYPE3"]=("UNCERTAINTY U") err_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TFORM1"]='E' err_hdu.header["TFORM2"]='E' err_hdu.header["TFORM3"]='E' err_hdu.header["EXTNAME"]="UNCERTAINTIES" err_hdu.header['POLAR']= 'T' err_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') err_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") err_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") err_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter') err_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') err_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed') err_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') err_hdu.header["COORDSYS"]=('G','Pixelization coordinate system') # ipdb.set_trace() tblist=fits.HDUList([prim,sig_hdu,err_hdu]) tblist.writeto(output_prefix+'prism_simulated_{0:0>3.0f}.fits'.format(bands[i]),clobber=True) print "prism_simulated_{:0>3.0f}.fits".format(bands[i]) print "Nside = {:0>4d}".format(nside)
# binning scheme b = nmt.NmtBin(nside, nlb=20, is_Dell=True) leff = b.get_effective_ells() # input spectra spectra = read_spectra(0.01) # pixwin corrections pw = hp.pixwin(nside, pol=True) pw = [pw[0][:3 * nside], pw[1][:3 * nside]] ell = np.arange(3 * nside) pwb2 = b.bin_cell(np.array(pw)) pwb3 = pwb2 / (leff * (leff + 1)) * 2 * pi # read the files with the reconstructed l*(l+1)*Cl cltt_mean = hp.read_cl('cls_tt_512_beam_v2.fits') cltt_std = hp.read_cl('scls_tt_512_beam_v2.fits') clte_mean = hp.read_cl('cls_te_512_beam_v2.fits') clte_std = hp.read_cl('scls_te_512_beam_v2.fits') # plots kw_Xpol = {'fmt': 'bo', 'markersize': 3} mp.figure() mp.title('TT') plot_spectra(spectra[0], color='g', label='Input Spectra', lmax=512) mp.errorbar(leff, cltt_mean / (pwb3[0] * pwb3[0]), cltt_std / (pwb3[0] * pwb3[0]), fmt='m.', label='Reconstructed Cls') mp.legend(loc='upper right', frameon=False) mp.ylim(0, 7000) mp.savefig('./figuras/TT_512_beam.pdf', format="pdf")
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False): print "Computing Cross Correlations for Bands "+str(bands_name) radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits' nside=1024 npix=hp.nside2npix(nside) cls=hp.read_cl(cl_file) simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring') bands=[43.1,94.5] q_fwhm=[27.3,11.7] wl=np.array([299792458./(band*1e9) for band in bands]) num_wl=len(wl) t_array=np.zeros((num_wl,npix)) q_array=np.zeros((num_wl,npix)) u_array=np.zeros((num_wl,npix)) for i in range(num_wl): tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio); t_array[i],q_array[i],u_array[i]=tmp_cmb iqu_band_i=[t_array[0],q_array[0],u_array[0]] iqu_band_j=[t_array[1],q_array[1],u_array[1]] alpha_radio=hp.read_map(alpha_file,hdu='maps/phi') temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits' planck_T=hp.read_map(temperature_file) planck_T*=1e-6 hdu_i=fits.open(i_file) field_pixels=hdu_i['FIELD PIXELS'].data hdu_i.close() iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring') iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring') planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring') iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383) iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383) planck_T=hp.smoothing(planck_T,fwhm=np.pi/180.,lmax=383) #alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383) const=2.*(wl_i**2-wl_j**2) Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const alpha_u=alpha_radio*iqu_band_j[2] alpha_q=-alpha_radio*iqu_band_j[1] DQm=hp.ma(Delta_Q) DUm=hp.ma(Delta_U) aQm=hp.ma(alpha_q) aUm=hp.ma(alpha_u) cross1_array=[] cross2_array=[] cross3_array=[] Bl_factor=np.repeat(1.,3*128) for field1 in xrange(4): mask_bool1=np.repeat(True,len(Delta_Q)) pix_cmb1=field_pixels.field(field1) pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)] ##Take Pixels From Field 1 tmp=np.zeros(hp.nside2npix(1024)) tmp[pix_cmb1]=1 tmp=hp.ud_grade(tmp,128) mask_bool1[np.nonzero(tmp)]=False # mask_bool1[np.where(P<.7e-6)]=True DQm.mask=mask_bool1 DUm.mask=mask_bool1 aQm.mask=mask_bool1 aUm.mask=mask_bool1 TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U]) TEm=hp.ma(TE_map) TEm[0].mask=mask_bool1 TEm[1].mask=mask_bool1 TEm[2].mask=mask_bool1 cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor) cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor) cross_tmp=hp.anafast(TEm,pol=1,nspec=4) cross3_array.append(cross_tmp[-1]/Bl_factor) cross1=np.mean(cross1_array,axis=0) ##Average over all Cross Spectra cross2=np.mean(cross2_array,axis=0) ##Average over all Cross Spectra cross3=np.mean(cross3_array,axis=0) ##Average over all Cross Spectra hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1) hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2) hp.write_cl('cl_'+bands_name+'_FR_TE_cmb.fits',cross3) return (cross1,cross2,cross3)
print("A set of deep learning experiments on Cosmic Microwave Background Radiation Data") print("by Pranath Reddy & Amit Mishra\n") print("TEMPERATURE SKY MAP GENERATOR (Outputs file in standard fits format.)\n") import healpy as hp import numpy as np import random import time import os import math import datetime if not os.path.exists("./map_files"): os.makedirs("./map_files") clsData = hp.read_cl('cl_data.fits') nside = 1024 print("Enter the number of maps you want to generate:") mapsToGenerate = input() #number of maps to generate iterTime=0 Data_Labels = np.zeros(shape=(mapsToGenerate*12,1)) j = 0 while iterTime<mapsToGenerate : print("Generating map in fits format: "+str(iterTime+1)+"/"+str(mapsToGenerate))
if bp_corr: table["title"] += " BPCORR" table["rows"] = [] summary_row = dict(tag=table["title"], numslinks=[]) try: for surv in survs[:-1]: for surv2 in survs[1:]: if surv2 <= surv: pass else: comb = swap_surv((surv, surv2)) metadata_filename = os.path.join(root_folder, "surveydiff", "%s_SS%d-SS%d%s_cl.json" % (chtag, comb[0], comb[1], bp_tag[bp_corr])) with open(metadata_filename) as metadata_file: metadata=json.load(metadata_file) with open(root_folder + metadata["file_name"],"rb") as openf: spec = hp.read_cl(openf) if isinstance(spec, list): spec = spec[cl_comp[comp]] spec *= 1e12 binspec, binspecerr = bin(spec, 5) ell, eller = bin(np.arange(len(spec)), 5) ell = np.arange(len(spec)) binspec = spec plt.loglog(ell, binspec, label="SS%d-SS%d" % comb, color=colors[i]) i += 1 f=os.path.join(root_folder, "halfrings", "%s_SS%s_cl.json" % (chtag, "full")) with open(f) as openf: metadata=json.load(openf) with open(root_folder + metadata["file_name"], "rb") as openf: spec = hp.read_cl(openf)
subdir5 = '/gainval_v005/coadd_map/coadd_map1/' subdir6 = '/gainval_v006/coadd_map/coadd_map1/' if sim_option == 'random_r': subdir2 = '/gainval_v012/coadd_map/coadd_map1/' subdir3 = '/gainval_v013/coadd_map/coadd_map1/' subdir4 = '/gainval_v014/coadd_map/coadd_map1/' subdir5 = '/gainval_v015/coadd_map/coadd_map1/' subdir6 = '/gainval_v016/coadd_map/coadd_map1/' if sim_option == '1of_r': subdir2 = '/gainval_v022/coadd_map/coadd_map1/' subdir3 = '/gainval_v023/coadd_map/coadd_map1/' subdir4 = '/gainval_v024/coadd_map/coadd_map1/' subdir5 = '/gainval_v025/coadd_map/coadd_map1/' subdir6 = '/gainval_v026/coadd_map/coadd_map1/' Clin = h.read_cl(dir + subdir1 + 'Clin_anafast.fits') Cl1 = h.read_cl(dir + subdir1 + 'Clout_anafast.fits') Cl2 = h.read_cl(dir + subdir2 + 'Clout_anafast.fits') Cl3 = h.read_cl(dir + subdir3 + 'Clout_anafast.fits') Cl4 = h.read_cl(dir + subdir4 + 'Clout_anafast.fits') Cl5 = h.read_cl(dir + subdir5 + 'Clout_anafast.fits') Cl6 = h.read_cl(dir + subdir6 + 'Clout_anafast.fits') num = len(Clin[0]) ell = np.arange(num) py.figure(0, figsize=(15, 8)) py.subplot(121) py.plot(ell, ell * (ell + 1.) / (2. * pi) * Clin[0], '-g') py.plot(ell, ell * (ell + 1.) / (2. * pi) * Clin[1], '-b') py.plot(ell, ell * (ell + 1.) / (2. * pi) * Clin[2], '-r')
filepath = '../mice_output/' + str(sq_degrees) + '_sq_degrees_' + str( patch_count) + '_patches/' ################################################################ ## TreeCorr # for auto-correlation of the mice density fluctuations # also multiply by mean density in patch to get integrated 3-pt function i_zeta filepath_map = filepath + 'mice_map/' createFolder(filepath_map + 'B_treecorr_patches_correlated/') for i in range(patch_count): #print('Patch # '+str(i+1)) density_fluctuations = hp.read_cl( filepath_map + 'A_healpy_patches_produced/del_mice_patch_' + str(i + 1) + '.fits') density_fluctuations_RA = density_fluctuations[0, :] density_fluctuations_dec = density_fluctuations[1, :] density_fluctuations_del = density_fluctuations[2, :] mean_del = np.mean(density_fluctuations_del) var_del = np.var(density_fluctuations) cat = treecorr.Catalog(ra=density_fluctuations_RA, dec=density_fluctuations_dec, ra_units='rad', dec_units='rad', k=density_fluctuations_del)
plt.title("CIB Power Spectra @ l=500 per shell") plt.xlabel(r'$z$', fontsize=40) plt.ylabel(r"$dC_l/dz$", fontsize=40) print z print z.shape[0] cl = [] while i < n: print "i:", i # if i%3 != 0: # i += 1 # continue zMin = i * dz zMax = (i + 1) * dz zMinCib = (i + 1) * dz zMaxCib = (i + 2) * dz lensedClFilename = "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_lensed_cl/lensed_cib_fullsky_ns2048_zmin%.2f_zmax%.2f_nu217_ns2048_tot.dat" % ( zMinCib, zMaxCib) lensedCl = hp.read_cl(lensedClFilename) pt = lensedCl[500] # plt.plot(z[i], pt, 'r-') cl.append(pt) i += 1 z_interp = np.arange(0, 4.7, 0.1) cl_interp_func = interpolate.splrep(z, cl, s=0) cl_interp = interpolate.splev(z_interp, cl_interp_func, der=0) plt.plot(z, cl, 'b-', linewidth=2) plt.show()
def getSMICA(theta_i=0.0, theta_f=180.0, nSteps=1800, lmax=100, lmin=2, newSMICA=False, useSPICE=True, newDeg=False, R1=False): """ Purpose: load CMB and mask maps from files, return correlation function for unmasked and masked CMB Mostly follows Copi et. al. 2013 for cut sky C(theta) Uses: get_crosspower.py (for plotting) C(theta) save file getSMICAfile.npy Inputs: theta_i,theta_f: starting and ending points for C(theta) in degrees nSteps: number of intervals between i,f points lmax: the maximum l value to include in legendre series for C(theta) lmin: the lowest l to use in C(theta,Cl) and S_{1/2} = CIC calculation newSMICA: set to True to reload data from files and recompute if False, will load C(theta) curves from file useSPICE: if True, will use SPICE to find power spectra if False, will use anafast, following Copi et. al. 2013 Default: True newDeg: set to True to recalculate map and mask degredations Note: the saved files are dependent on the value of lmax that was used Default: False R1: set to True to use R1 versions of SMICA and mask. Otherwise, R2 is used Only affects which Planck files are used; irrelevant if newDeg=False. Default: False Outupts: theta: nSteps+1 angles that C(theta) arrays are for (degrees) unmasked: C(theta) unmasked (microK^2) masked: C(theta) masked (microK^2) """ saveFile = 'getSMICAfile.npy' #for anafast saveFile2 = 'getSMICAfile2.npy' #for spice if newSMICA: # start with map degredations mapDegFile = 'smicaMapDeg.fits' maskDegFile = 'maskMapDeg.fits' if newDeg: # load maps; default files have 2048,NESTED,GALACTIC dataDir = '/Data/' if R1: smicaFile = 'COM_CompMap_CMB-smica-field-I_2048_R1.20.fits' maskFile = 'COM_Mask_CMB-union_2048_R1.10.fits' else: smicaFile = 'COM_CMB_IQU-smica-field-int_2048_R2.01_full.fits' maskFile = 'COM_CMB_IQU-common-field-MaskInt_2048_R2.01.fits' print 'opening file ', smicaFile, '... ' smicaMap, smicaHead = hp.read_map(dataDir + smicaFile, nest=True, h=True) print 'opening file ', maskFile, '... ' maskMap, maskHead = hp.read_map(dataDir + maskFile, nest=True, h=True) if R1: smicaMap *= 1e-6 #microK to K # degrade map and mask resolutions from 2048 to 128; convert NESTED to RING useAlm = True # set to True to do harmonic space scaling, False for ud_grade NSIDE_big = 2048 NSIDE_deg = 128 while 4 * NSIDE_deg < lmax: NSIDE_deg *= 2 print 'resampling maps at NSIDE = ', NSIDE_deg, '... ' order_out = 'RING' if useAlm: # transform to harmonic space smicaMapRing = hp.reorder(smicaMap, n2r=True) maskMapRing = hp.reorder(maskMap, n2r=True) smicaCl, smicaAlm = hp.anafast(smicaMapRing, alm=True, lmax=lmax) maskCl, maskAlm = hp.anafast(maskMapRing, alm=True, lmax=lmax) # this gives 101 Cl values and 5151 Alm values. Why not all 10201 Alm.s? # scale by pixel window functions bigWin = hp.pixwin(NSIDE_big) degWin = hp.pixwin(NSIDE_deg) winRatio = degWin / bigWin[:degWin.size] degSmicaAlm = hp.almxfl(smicaAlm, winRatio) degMaskAlm = hp.almxfl(maskAlm, winRatio) # re-transform back to real space smicaMapDeg = hp.alm2map(degSmicaAlm, NSIDE_deg) maskMapDeg = hp.alm2map(degMaskAlm, NSIDE_deg) else: smicaMapDeg = hp.ud_grade(smicaMap, nside_out=NSIDE_deg, order_in='NESTED', order_out=order_out) maskMapDeg = hp.ud_grade(maskMap, nside_out=NSIDE_deg, order_in='NESTED', order_out=order_out) # note: degraded resolution mask will no longer be only 0s and 1s. # Should it be? Yes. # turn smoothed mask back to 0s,1s mask threshold = 0.9 maskMapDeg[np.where(maskMapDeg > threshold)] = 1 maskMapDeg[np.where(maskMapDeg <= threshold)] = 0 #testing #hp.mollview(smicaMapDeg) #plt.show() #hp.mollview(maskMapDeg) #plt.show() #return 0 hp.write_map(mapDegFile, smicaMapDeg, nest=False) # use False if order_out='RING' above hp.write_map(maskDegFile, maskMapDeg, nest=False) else: # just load previous degradations (dependent on previous lmax) print 'loading previously degraded map and mask...' smicaMapDeg = hp.read_map(mapDegFile, nest=False) maskMapDeg = hp.read_map(maskDegFile, nest=False) # find power spectra print 'find power spectra... ' if useSPICE: ClFile1 = 'spiceCl_unmasked.fits' ClFile2 = 'spiceCl_masked.fits' # note: lmax for spice is 3*NSIDE-1 or less ispice(mapDegFile, ClFile1, subav="YES", subdipole="YES") Cl_unmasked = hp.read_cl(ClFile1) ispice(mapDegFile, ClFile2, maskfile1=maskDegFile, subav="YES", subdipole="YES") Cl_masked = hp.read_cl(ClFile2) Cl_mask = np.zeros(Cl_unmasked.shape[0]) # just a placeholder ell = np.arange(Cl_unmasked.shape[0]) else: # use anafast Cl_unmasked = hp.anafast(smicaMapDeg, lmax=lmax) Cl_masked = hp.anafast(smicaMapDeg * maskMapDeg, lmax=lmax) Cl_mask = hp.anafast(maskMapDeg, lmax=lmax) ell = np.arange(lmax + 1) #anafast output seems to start at l=0 # plot them doPlot = False #True if doPlot: gcp.showCl(ell, np.array([Cl_masked, Cl_unmasked]), title='power spectra of unmasked, masked SMICA map') # Legendre transform to real space print 'Legendre transform to real space... ' # note: getCovar uses linspace in x for thetaArray thetaDomain, CofTheta = getCovar(ell[:lmax + 1], Cl_unmasked[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) thetaDomain, CCutofThetaTA = getCovar(ell[:lmax + 1], Cl_masked[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) CofTheta *= 1e12 # K^2 to microK^2 CCutofThetaTA *= 1e12 # K^2 to microK^2 if useSPICE: CCutofTheta = CCutofThetaTA #/(4*np.pi) else: thetaDomain, AofThetaInverse = getCovar( ell[:lmax + 1], Cl_mask[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=0) # don't zilch the mask # note: zilching the mask's low power drastically changed C(theta) for masked anafast # Not sure why. CCutofTheta = CCutofThetaTA / AofThetaInverse xArray = np.cos(thetaDomain * np.pi / 180.) # back to frequency space for S_{1/2} = CIC calculation if useSPICE: CCutofL = Cl_masked[:lmax + 1] * 1e12 #K^2 to microK^2 else: legCoefs = legfit(xArray, CCutofTheta, lmax) CCutofL = legCoefs * (4 * np.pi) / (2 * ell[:lmax + 1] + 1) # S_{1/2} myJmn = getJmn(lmax=lmax) SMasked = np.dot(CCutofL[lmin:], np.dot(myJmn[lmin:, lmin:], CCutofL[lmin:])) SNoMask = np.dot( Cl_unmasked[lmin:lmax + 1], np.dot(myJmn[lmin:, lmin:], Cl_unmasked[lmin:lmax + 1])) * 1e24 #two factors of K^2 to muK^2 # save results if useSPICE: np.save( saveFile2, np.array( [thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked])) else: np.save( saveFile, np.array( [thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked])) else: # load from file if useSPICE: fileData = np.load(saveFile2) else: fileData = np.load(saveFile) thetaDomain = fileData[0] CofTheta = fileData[1] CCutofTheta = fileData[2] SNoMask = fileData[3] SMasked = fileData[4] return thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked
def test(useCLASS=1, useLensing=1, classCamb=1, nSims=1000, lmax=100, lmin=2, newSMICA=False, newDeg=False, suppressC2=False, suppFactor=0.23, filterC2=False, filtFacLow=0.1, filtFacHigh=0.2, doCovar=False): """ code for testing the other functions in this module Inputs: useCLASS: set to 1 to use CLASS, 0 to use CAMB CLASS Cl has early/late split at z=50 CAMB Cl has ISWin/out split: ISWin: 0.4<z<0.75, ISWout: the rest Note: CAMB results include primary in ISWin and ISWout (not as intended) default: 1 useLensing: set to 1 to use lensed Cl, 0 for non-lensed default: 1 classCamb: if 1: use the CAMB format of CLASS output, if 0: use CLASS format Note: parameter not used if useCLASS = 0 default: 1 nSims: the number of simulations to do for ensemble default: 1000 lmax: the highest l to include in Legendre transforms default: 100 lmin: the lowest l to include in S_{1/2} = CIC calculations default: 2 newSMICA: set to True to recalculate SMICA results default: False newDeg: set to True to recalculate map and mask degredations (only if newSMICA is also True) default: False suppressC2: set to True to suppress theoretical C_2 (quadrupole) by suppFactor before creating a_lm.s Default: False suppFactor: multiplies C_2 if suppressC2 is True Default: 0.23 # from Tegmark et. al. 2003, figure 13 (WMAP) filterC2 : set to true to filter simulated CMBs after spice calculates cut sky C_l. Sims will pass filter if C_2 * filtFacLow < C_2^sim < C_2 * filtFacHigh. Default: False filtFacLow,filtFacHigh: defines C_2 range for passing simulated CMBs Default: 0.1,0.2 doCovar: set to True to calculate C(theta) and S_{1/2} distritutions for ensemble Note: meant to capture functionality from sim_stats.py; ZK 2016.11.13 Default: False """ ############################################################################## # load theoretical power spectra # load data ell, fullCl, primCl, lateCl, crossCl = gcp.loadCls(useCLASS=useCLASS, useLensing=useLensing, classCamb=classCamb) # fill beginning with zeros startEll = int(ell[0]) ell = np.append(np.arange(startEll), ell) fullCl = np.append(np.zeros(startEll), fullCl) primCl = np.append(np.zeros(startEll), primCl) lateCl = np.append(np.zeros(startEll), lateCl) crossCl = np.append(np.zeros(startEll), crossCl) # suppress C_2 to see what happens in enesmble if suppressC2: fullCl[2] *= suppFactor primCl[2] *= suppFactor lateCl[2] *= suppFactor crossCl[2] *= suppFactor conv = ell * (ell + 1) / (2 * np.pi) #print ell,conv #ell[0]=2.0 # apply beam and pixel window functions to power spectra # note: to ignore the non-constant pixel shape, W(l) must be > B(l) # however, this is not true for NSIDE=128 and gauss_beam(5') # Here I ignore this anyway and proceed myNSIDE = 128 # must be same NSIDE as in sims.getSMICA function Wpix = hp.pixwin(myNSIDE) Bsmica = hp.gauss_beam(5. / 60 * np.pi / 180) # 5 arcmin WlMax = Wpix.size if WlMax < lmax: print 'die screaming!!!' return 0 fullCl = fullCl[:WlMax] * (Wpix * Bsmica)**2 primCl = primCl[:WlMax] * (Wpix * Bsmica)**2 lateCl = lateCl[:WlMax] * (Wpix * Bsmica)**2 crossCl = crossCl[:WlMax] * (Wpix * Bsmica)**2 # note: i tried sims without this scaling, and results seemed the same at a glance ############################################################################## # load SMICA data, converted to C_l, via SpICE if newSMICA or doCovar: theta_i = 0.0 #degrees theta_f = 180.0 #degrees nSteps = 1800 thetaArray2sp, C_SMICAsp, C_SMICAmaskedsp, S_SMICAnomasksp, S_SMICAmaskedsp = \ sims.getSMICA(theta_i=theta_i,theta_f=theta_f,nSteps=nSteps,lmax=lmax,lmin=lmin, newSMICA=newSMICA,newDeg=newDeg,useSPICE=True) # filenames for SpICE to use # super lame that spice needs to read/write from disk, but here goes... RAMdisk = '/Volumes/ramdisk/' ClTempFile = RAMdisk + 'tempCl.fits' mapTempFile = RAMdisk + 'tempMap.fits' mapDegFile = RAMdisk + 'smicaMapDeg.fits' # this should have been created by sims.getSMICA maskDegFile = RAMdisk + 'maskMapDeg.fits' # this should have been created by sims.getSMICA # create RAM Disk for SpICE and copy these files there using bash RAMsize = 4 #Mb ramDiskOutput = subprocess.check_output('./ramdisk.sh create ' + str(RAMsize), shell=True) print ramDiskOutput diskID = ramDiskOutput[ 31:41] # this might not grab the right part; works for '/dev/disk1' subprocess.call('cp smicaMapDeg.fits ' + RAMdisk, shell=True) subprocess.call('cp maskMapDeg.fits ' + RAMdisk, shell=True) ispice(mapDegFile, ClTempFile, maskfile1=maskDegFile, subav="YES", subdipole="YES") ClsmicaCut = hp.read_cl(ClTempFile) # find S_{1/2} for SMICA. Should actually optimize but see what happens here first. if doCovar: myJmn = legprodint.getJmn(endX=0.5, lmax=lmax, doSave=False) #Ssmica = np.dot(ClsmicaCut[lmin:lmax+1],np.dot(myJmn[lmin:,lmin:], # ClsmicaCut[lmin:lmax+1]))*1e24 #K^4 to microK^4 ############################################################################## # create ensemble of realizations and gather statistics spiceMax = myNSIDE * 3 # should be lmax+1 for SpICE ClEnsembleCut = np.zeros([nSims, spiceMax]) if doCovar: ClEnsembleFull = np.zeros([nSims, lmax + 1]) simEll = np.arange(spiceMax) # option for creating C(\theta) and S_{1/2} ensembles if doCovar: cEnsembleCut = np.zeros([nSims, nSteps + 1]) cEnsembleFull = np.zeros([nSims, nSteps + 1]) sEnsembleCut = np.zeros(nSims) sEnsembleFull = np.zeros(nSims) doTime = True # to time the run and print output startTime = time.time() #for nSim in range(nSims): nSim = 0 while nSim < nSims: print 'starting masked Cl sim ', nSim + 1, ' of ', nSims alm_prim, alm_late = hp.synalm((primCl, lateCl, crossCl), lmax=lmax, new=True) mapSim = hp.alm2map(alm_prim + alm_late, myNSIDE, lmax=lmax) hp.write_map(mapTempFile, mapSim) if doCovar: ClEnsembleFull[nSim] = hp.alm2cl(alm_prim + alm_late) ispice(mapTempFile, ClTempFile, maskfile1=maskDegFile, subav="YES", subdipole="YES") ClEnsembleCut[nSim] = hp.read_cl(ClTempFile) # Check for low power of cut sky C_2 if (filterC2 == True and fullCl[2] * filtFacHigh > ClEnsembleCut[nSim, 2] and ClEnsembleCut[nSim, 2] > fullCl[2] * filtFacLow ) or filterC2 == False: doPlot = False #True if doPlot: gcp.showCl(simEll[:lmax + 1], ClEnsembleCut[nSim, :lmax + 1], title='power spectrum of simulation ' + str(nSim + 1)) if doCovar: # note: getCovar uses linspace in x for thetaArray thetaArray, cArray = sims.getCovar(simEll[:lmax + 1], ClEnsembleCut[nSim, :lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) cEnsembleCut[nSim] = cArray thetaArray, cArray = sims.getCovar(simEll[:lmax + 1], ClEnsembleFull[nSim, :lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) cEnsembleFull[nSim] = cArray # S_{1/2} sEnsembleCut[nSim] = np.dot( ClEnsembleCut[nSim, lmin:lmax + 1], np.dot(myJmn[lmin:, lmin:], ClEnsembleCut[nSim, lmin:lmax + 1])) sEnsembleFull[nSim] = np.dot( ClEnsembleFull[nSim, lmin:lmax + 1], np.dot(myJmn[lmin:, lmin:], ClEnsembleFull[nSim, lmin:lmax + 1])) nSim += 1 timeInterval1 = time.time() - startTime if doTime: print 'time elapsed: ', int(timeInterval1 / 60.), ' minutes' # free the RAM used by SpICE's RAM disk ramDiskOutput = subprocess.check_output('./ramdisk.sh delete ' + diskID, shell=True) print ramDiskOutput # put SMICA in as 0th member of the ensemble; 1e12 to convert K^2 to microK^2 ClEnsembleCut = np.vstack((ClsmicaCut * 1e12, ClEnsembleCut)) nSims += 1 ############################################################################## # create S(x) for each C_l, using interpolation nXvals = 181 thetaVals = np.linspace(0, 180, nXvals) # one degree intervals xVals = np.cos(thetaVals * np.pi / 180) Jmnx = np.empty([nXvals, lmax + 1, lmax + 1]) for index, xVal in enumerate(xVals): Jmnx[index] = legprodint.getJmn(endX=xVal, lmax=lmax, doSave=False) SxToInterpolate = np.empty(nXvals) # create list of functions #dummy = lambda x: x**2 #SofXList = [dummy for i in range(nSims)] # here is where this program starts to diverge from the purely python version # create array to hold S_x values SxValsArray = np.empty([nSims, nXvals]) for nSim in range(nSims): print 'starting S(x) sim ', nSim + 1, ' of ', nSims for index, xVal in enumerate(xVals): #not using xVal? SxToInterpolate[index] = np.dot( ClEnsembleCut[nSim, lmin:lmax + 1], np.dot(Jmnx[index, lmin:, lmin:], ClEnsembleCut[nSim, lmin:lmax + 1])) #SofX = interp1d(xVals,SxToInterpolate) #SofXList[nSim] = SofX SxValsArray[nSim] = SxToInterpolate """ #print SofXList#[nSim] doPlot=False#True if doPlot: nplotx = (nXvals-1)*10+1 plotTheta = np.linspace(0,180,nplotx) plotx = np.cos(plotTheta*np.pi/180) plotS = SofXList[nSim](plotx) plt.plot(plotx,plotS) plt.title('S(x) for simulation '+str(nSim+1)) plt.show() doPlot = False#True if doPlot: for nSim in range(nSims): nplotx = (nXvals-1)*10+1 plotTheta = np.linspace(0,180,nplotx) plotx = np.cos(plotTheta*np.pi/180) plotS = SofXList[nSim](plotx) plt.plot(plotx,plotS,label='sim '+str(nSim+1)) #plt.plot(xVals,SxValsArray[nSim],label='sim '+str(nSim+1)) #plt.legend() plt.title('S(x) for '+str(nSims)+ 'simulations') plt.xlabel('x') plt.ylabel('S_x') plt.show() """ # Kludge for extracting the S(x) ensemble to disk for Jackknife testing later saveAndExit = False #True saveAndExitFile = 'SofXEnsemble.npy' if saveAndExit: np.save(saveAndExitFile, np.vstack((xVals, SxValsArray))) print 'saving file ', saveAndExitFile, ' and exiting.' return 0 ############################################################################## # send data to c library function in optimizeSx.so xStart = -1.0 xEnd = 1.0 nSearch = 181 # same num as nXvals for now, but spaced equally in x, not theta PvalMinima = np.empty(nSims) # for return values XvalMinima = np.empty(nSims) # for return values doTime = True # to time the run and print output startTime = time.time() optSx(xVals, nXvals, SxValsArray, nSims, xStart, xEnd, nSearch, PvalMinima, XvalMinima) timeInterval2 = time.time() - startTime if doTime: print 'time elapsed: ', int(timeInterval2 / 60.), ' minutes' ############################################################################## # create distribution of S(XvalMinima) SxEnsembleMin = np.empty(nSims) for nSim in range(nSims): # need to interpolate since optSx uses interpolation SofX = interp1d(xVals, SxValsArray[nSim]) SxEnsembleMin[nSim] = SofX(XvalMinima[nSim]) ############################################################################## # save S_x, P(x), x results saveFile = "optSxResult.npy" np.save(saveFile, np.vstack((PvalMinima, XvalMinima, SxEnsembleMin))) saveFileC2 = "optSxC2.npy" np.save(saveFileC2, ClEnsembleCut[:, 2]) #for C_2 # save C(theta) and S{1/2} results if doCovar: avgEnsembleFull = np.average(cEnsembleFull, axis=0) stdEnsembleFull = np.std(cEnsembleFull, axis=0) # do I need a better way to describe confidence interval? avgEnsembleCut = np.average(cEnsembleCut, axis=0) stdEnsembleCut = np.std(cEnsembleCut, axis=0) saveFile1 = "simStatResultC.npy" np.save( saveFile1, np.vstack((thetaArray, avgEnsembleFull, stdEnsembleFull, avgEnsembleCut, stdEnsembleCut))) saveFile2 = "simStatC_SMICA.npy" np.save(saveFile2, np.vstack((thetaArray2sp, C_SMICAsp, C_SMICAmaskedsp))) saveFile3 = "simStatResultS.npy" np.save( saveFile3, np.vstack((np.hstack((np.array(S_SMICAnomasksp), sEnsembleFull)), np.hstack((np.array(S_SMICAmaskedsp), sEnsembleCut))))) ############################################################################## # plot/print results makePlots(saveFile=saveFile, suppressC2=suppressC2) #makeCornerPlot(saveFile=saveFile,suppressC2=suppressC2) makeCornerPlotSmall(saveFile=saveFile, suppressC2=suppressC2) c2pval = makeC2Plot(saveFile=saveFileC2) if doCovar: sims.makePlots(saveFile1=saveFile1, saveFile2=saveFile2, saveFile3=saveFile3) pv = PvalPval(saveFile=saveFile) print ' ' print 'nSims = ', nSims - 1 print 'time interval 1: ', timeInterval1, 's, time interval 2: ', timeInterval2, 's' print ' => ', timeInterval1 / (nSims - 1), ' s/sim, ', timeInterval2 / ( nSims - 1), ' s/sim' print 'SMICA optimized S_x: S = ',SxEnsembleMin[0],', for x = ',XvalMinima[0], \ ', with p-value ',PvalMinima[0] print 'P-value of P-value for SMICA: ', pv print ' ' print 'p-value of C_2^SMICA in distribution: ', c2pval print ' ' print 'step 3: profit' print ''
symmetric_cl='YES') ispice(fn_cmb, fn_cl_cmb, nlmax=lmax, beam1=fwhm_arcmin, beam2=fwhm_arcmin, mapfile2=fn_cmb, weightfile1=fn_mask, weightfile2=fn_mask, polarization='YES', subav='YES', subdipole='YES', symmetric_cl='YES') print('Loading', fn_cl_clean) cl_cmb = hp.read_cl(fn_cl_cmb) print('Loading', fn_cl_cmb) cl_clean = hp.read_cl(fn_cl_clean) cl_in.append(cl_cmb[1:4][:lmax_out + 1]) cl_out.append(cl_clean[1:4][:lmax_out + 1]) cl_in = np.array(cl_in) cl_out = np.array(cl_out) def get_corr_and_var(x, y): def get_corr(x, y): return np.dot(x, y) / np.dot(x, x) c0 = get_corr(x, y)
def main(): ##Define Files used to make maps radio_file='/data/wmap/faraday_MW_realdata.fits' cl_file='/home/matt/wmap/simul_scalCls.fits.lens' output_prefix='/home/matt/Planck/data/faraday/simul_maps/' synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits' dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits' gamma_dust=6.626e-34/(1.38e-23*21) ##Define Parameters used to simulate Planck Fields bands=np.array([30.,44.,70.,100.,143.,217.,353.]) #beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0]) #noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6 #noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6 #beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0]) beam_fwhm=np.array([32.29,27,13.21,9.67,7.26,4.96,4.93]) pix_area_array=np.array([np.repeat(hp.nside2pixarea(1024),3),np.repeat(hp.nise2pixarea(2048),4)]) pix_area_array=np.sqrt(pix_area_array)*60*180./np.pi #beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0]) #noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6 #noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6 noise_const_temp=np.array([2.5,2.7,3.5,1.29,.555,.78,2.56])/pix_area_array*60.e-6 noise_const_pol=np.array([3.5,4.0,5.0,1.96,1.17,1.75,7.31])/pix_area_array*60.e-6 krj_to_kcmb=np.array([1.0217,1.0517,np.mean([1.1360,1.1405,1.1348]), np.mean([1.3058,1.3057]),np.mean([1.6735,1.6727]),np.mean([3,2203,3.2336,3.2329,3.2161]),np.mean([14.261,14.106])])*1e-6 sync_factor=krj_to_kcmb*np.array([20.*(.408/x)**2 for x in bands]) dust_factor=krj_to_kcmb*np.array([163.e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353)**2.54 for x in bands]) nside=2048 npix=hp.nside2npix(nside) pix_area=hp.nside2pixarea(nside) ##Reverse ordre of arrays to Simulate larger NSIDE maps first bands = bands[::-1] beam_fwhm= beam_fwhm[::-1] noise_const_temp = noise_const_temp[::-1] noise_const_pol = noise_const_pol[::-1] wl=np.array([299792458./(band*1e9) for band in bands]) num_wl=len(wl) tqu_array=[] sigma_array=[] LFI=False LFI_IND=np.where(bands == 70)[0][0] cls=hp.read_cl(cl_file) print 'Generating Map' simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1); alpha_radio=hp.read_map(radio_file,hdu='maps/phi'); alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring') bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*nside-1) hdu_sync=fits.open(synchrotron_file) sync_q=hdu_sync[1].data.field(0) sync_u=hdu_sync[1].data.field(1) sync_q=hp.reorder(sync_q,n2r=1) tmp_alm=hp.map2alm(sync_q) tmp_alm=hp.almxfl(tmp_alm,1./bl_40) sync_q=hp.alm2map(tmp_alm,nside) #sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True) sync_q=hp.ud_grade(sync_q,nside_out=nside) sync_u=hp.reorder(sync_u,n2r=1) tmp_alm=hp.map2alm(sync_u) tmp_alm=hp.almxfl(tmp_alm,1./bl_40) sync_u=hp.alm2map(tmp_alm,nside) #sync_u=hp.smoothing(sync_u,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True) sync_u=hp.ud_grade(sync_u,nside_out=nside) hdu_sync.close() bl_10=hp.gauss_beam(10*np.pi/(180.*60.),3*nside-1) hdu_dust=fits.open(dust_file) dust_q=hdu_dust[1].data.field(0) dust_u=hdu_dust[1].data.field(1) hdu_dust.close() dust_q=hp.reorder(dust_q,n2r=1) tmp_alm=hp.map2alm(dust_q) tmp_alm=hp.almxfl(tmp_alm,1./bl_10) dust_q=hp.alm2map(tmp_alm,nside) #dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True) dust_q=hp.ud_grade(dust_q,nside) dust_u=hp.reorder(dust_u,n2r=1) tmp_alm=hp.map2alm(dust_u) tmp_alm=hp.almxfl(tmp_alm,1./bl_10) dust_u=hp.alm2map(tmp_alm,nside) #dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True) dust_u=hp.ud_grade(dust_u,nside) nside=2048 pix_area=hp.nside2pixarea(nside) prim=fits.PrimaryHDU() prim.header['COMMENT']="Simulated Planck Data with Polarization" prim.header['COMMENT']="Created using CAMB" #ipdb.set_trace() for i in range(num_wl): if LFI: nside=1024 npix=hp.nside2npix(1024) simul_cmb=hp.ud_grade(simul_cmb,nside) alpha_radio=hp.ud_grade(alpha_radio,nside) sync_q=hp.ud_grade(sync_q,nside) sync_u=hp.ud_grade(sync_u,nside) dust_q=hp.ud_grade(sync_q,nside) dust_u=hp.ud_grade(sync_u,nside) pix_area=hp.nside2pixarea(nside) tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio); tmp_didqdu=np.array([np.random.normal(0,1,npix)*noise_const_temp[i], np.random.normal(0,1,npix)*noise_const_pol[i] , np.random.normal(0,1,npix)*noise_const_pol[i]]) tmp_tqu=np.copy(tmp_cmb) #Add Polarized Foreground emission tmp_tqu[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q ) tmp_tqu[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u ) # tmp_tqu[1]+= np.copy(sync_factor[i]*sync_q) # tmp_tqu[2]+= np.copy(sync_factor[i]*sync_u) tmp_tqu=hp.sphtfunc.smoothing(tmp_tqu,fwhm=beam_fwhm[i]*np.pi/(180.*60.),pol=1) #Add Noise After smooothing #tmp_tqu+=tmp_didqdu sig_hdu=fits.ImageHDU(tmp_tqu) sig_hdu.header['TFIELDS']=(len(tmp_tqu),'number of fields in each row') sig_hdu.header["TTYPE1"]=("STOKES I") sig_hdu.header["TTYPE2"]=("STOKES Q") sig_hdu.header["TTYPE3"]=("STOKES U") sig_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') sig_hdu.header["TFORM1"]='E' sig_hdu.header["TFORM2"]='E' sig_hdu.header["TFORM3"]='E' sig_hdu.header["EXTNAME"]="STOKES IQU" sig_hdu.header['POLAR']= 'T' sig_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') sig_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") sig_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") sig_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter') sig_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') sig_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed') sig_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') sig_hdu.header["COORDSYS"]=('G','Pixelization coordinate system') err_hdu=fits.ImageHDU(tmp_didqdu) err_hdu.header['TFIELDS']=(len(tmp_didqdu),'number of fields in each row') err_hdu.header["TTYPE1"]=("UNCERTAINTY I") err_hdu.header["TTYPE2"]=("UNCERTAINTY Q") err_hdu.header["TTYPE3"]=("UNCERTAINTY U") err_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map') err_hdu.header["TFORM1"]='E' err_hdu.header["TFORM2"]='E' err_hdu.header["TFORM3"]='E' err_hdu.header["EXTNAME"]="UNCERTAINTIES" err_hdu.header['POLAR']= 'T' err_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU') err_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation") err_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED") err_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter') err_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL') err_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed') err_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT') err_hdu.header["COORDSYS"]=('G','Pixelization coordinate system') # ipdb.set_trace() tblist=fits.HDUList([prim,sig_hdu,err_hdu]) tblist.writeto(output_prefix+'planck_simulated_{0:0>3.0f}.fits'.format(bands[i]),clobber=True) print "planck_simulated_{:0>3.0f}.fits".format(bands[i]) print "Nside = {:0>4d}".format(nside) if i+1 == LFI_IND: LFI=True