def test_cmb_tensor(tmp_path, monkeypatch, tensor_to_scalar):

    monkeypatch.setattr(utils, "PREDEFINED_DATA_FOLDERS", {"C": [str(tmp_path)]})
    nside = 256
    lmax = 512

    path = tmp_path / "websky" / "0.3"
    path.mkdir(parents=True)

    input_cl = np.zeros((6, lmax + 1), dtype=np.double)
    input_cl[1] = 1e5 * stats.norm.pdf(np.arange(lmax + 1), 250, 30)  # EE
    filename = path / "tensor_cl_r1_nt0.fits"

    hp.write_cl(filename, input_cl, overwrite=True)

    cmb_tensor = WebSkyCMBTensor("0.3", nside=nside, tensor_to_scalar=tensor_to_scalar)

    freq = 100 * u.GHz
    cmb_tensor_map = cmb_tensor.get_emission(freq)
    cmb_tensor_map = cmb_tensor_map.to(
        u.uK_CMB, equivalencies=u.cmb_equivalencies(freq)
    )

    cl = hp.anafast(cmb_tensor_map, use_pixel_weights=True, lmax=lmax)
    # anafast returns results in new ordering
    # TT, EE, BB, TE, EB, TB
    np.testing.assert_allclose(
        input_cl[5][200:300] * tensor_to_scalar, cl[2][200:300], rtol=0.2
    )
    np.testing.assert_allclose(0, cl[:2], rtol=1e-3)
    np.testing.assert_allclose(0, cl[3:], rtol=1e-3, atol=1e-4)
예제 #2
0
def masked_cl(ellmax, mask_name, mask_fn, outfits_root, final_maps):
    #Masked spectra
    print "Calculating masked C_l"
    mask = hp.read_map(mask_fn)  #0 where holes #RING ordering
    f_sky = np.sum(mask) / len(mask)
    print "f_sky =", f_sky
    masked_maps = [final_maps[0] * mask,
                   final_maps[1] * mask]  #(Q*mask,U*mask)
    T_map = [
        np.zeros_like(final_maps[0]),
    ]
    masked_cls = hp.anafast(T_map + masked_maps,
                            lmax=ellmax - 1)  #(TT,EE,BB,TE,EB,TB)
    pixrecip = np.concatenate(
        (np.ones(2),
         np.reciprocal(
             hp.pixwin(
                 hp.get_nside(masked_maps[0]),
                 pol=True)[1][2:ellmax])))  #P pixwin #Not defined for l < 2
    clscode = ['EE', 'BB', 'EB']
    clsidx = [1, 2, 4]
    for i in xrange(len(clscode)):
        cl_outfits = outfits_root + '_' + clscode[
            i] + 'cls_' + mask_name + '.fits'
        hp.write_cl(cl_outfits,
                    masked_cls[clsidx[i]] * pixrecip * pixrecip / f_sky)

    return 0
예제 #3
0
def test_from_cl(tmpdir):
    nside = 256
    lmax = 512

    folder = tmpdir.mkdir("cls")

    filename = os.path.join(str(folder), "cls.fits")

    input_cl = np.zeros((6, lmax + 1), dtype=np.double)
    # using healpy old ordering TT, TE, TB, EE, EB, BB
    # using healpy new ordering TT, EE, BB, TE, TB, EB
    input_cl[3] = 1e5 * stats.norm.pdf(np.arange(lmax + 1), 250, 30)  # EE
    hp.write_cl(filename, input_cl, overwrite=True)

    precomputed_alms = PrecomputedAlms(
        filename=filename,
        nside=nside,
        input_units="K_CMB",
        from_cl=True,
        from_cl_seed=100,
    )
    freq = 100 * u.GHz
    m = precomputed_alms.get_emission(freq)
    m = m.to(u.K_CMB, equivalencies=u.cmb_equivalencies(freq))

    cl = hp.anafast(m, lmax=lmax)
    # anafast returns results in new ordering
    # TT, EE, BB, TE, EB, TB
    np.testing.assert_allclose(input_cl[3][200:300], cl[1][200:300], rtol=0.2)
    np.testing.assert_allclose(0, cl[0], rtol=1e-3)
    np.testing.assert_allclose(0, cl[2:], rtol=1e-3, atol=1e-4)
예제 #4
0
def planck(i):
    print "Doing another map analysis"
    planckfits = planckdir + planckroot + planckcode[i] + planckend
    planckmaps = hp.read_map(planckfits)
    #pixrecip = 1. / hp.pixwin(hp.get_nside(planckmaps[i]))
    planckcls = hp.anafast(planckmaps, lmax=3998)  #* pixrecip * pixrecip
    planckclsfits = planckdir + planckroot + planckcode[i] + planckclsend
    hp.write_cl(planckclsfits, planckcls)
예제 #5
0
def get_cl(fname_cl, fname_map, fname_hits):
    if os.path.isfile(fname_cl):
        cl = hp.read_cl(fname_cl)
    else:
        mask = get_mask(fname_hits)
        m = hp.read_map(fname_map, None)
        cl = map2cl(m, mask)
        hp.write_cl(fname_cl, cl)
    return cl
예제 #6
0
def get_cl(fname_cl, fname_map, fname_hits):
    if os.path.isfile(fname_cl):
        cl = hp.read_cl(fname_cl)
    else:
        hits = None # hp.read_map(path_hits)
        m = hp.read_map(fname_map, None)
        cl = map2cl(m, hits)
        hp.write_cl(fname_cl, cl)
    return cl
예제 #7
0
def ffp6(i):
    ffp6fits = ffp6dir + ffp6dircode[i] + planckprefix[
        i] + ffp6root + ffp6code[i] + ffp6end
    ffp6maps = hp.read_map(ffp6fits)
    #pixrecip = 1. / hp.pixwin(hp.get_nside(planckmaps[i]))
    ffp6cls = hp.anafast(ffp6maps, lmax=3998)  #* pixrecip * pixrecip
    ffp6clsfits = ffp6dir + ffp6dircode[i] + planckprefix[
        i] + ffp6root + ffp6code[i] + ffp6clsend
    hp.write_cl(ffp6clsfits, ffp6cls)
예제 #8
0
def combine_cmb_noise(config, n, cmb_dir, noise_dir, coupling_matrix, mask):
    combined_dir = os.path.join(config['toplevel_dir'],
                                'ffp8_mc_combined_{:04}'.format(n))
    if not os.path.exists(combined_dir):
        os.mkdir(combined_dir)

    logger = log.make_logger('ffp8_combined_{}'.format(n),
                             log_file=os.path.join(combined_dir, 'log.txt'),
                             toplevel_log_file=os.path.join(
                                 config['toplevel_dir'],
                                 'lgmca_postprocessing_combination_log.txt'))

    with log.Timer(logger,
                   'Combining CMB {0} and noise {0}'.format(n)).with_level(
                       logging.INFO):
        with log.Timer(logger, 'Reading CMB & noise maps and combining them'):
            cmb_map = hp.read_map(os.path.join(cmb_dir,
                                               'FFP8_v1_aggregated_cmb.fits'),
                                  verbose=False)
            noise_map = hp.read_map(os.path.join(
                noise_dir, 'FFP8_v1_aggregated_cmb.fits'),
                                    verbose=False)
            combined_map_ring = cmb_map + noise_map

        # No need to waste memory
        del cmb_map
        del noise_map

        combined_map = hp.reorder(combined_map_ring, r2n=True)
        cls = hp.anafast(combined_map_ring,
                         lmax=config['matmask_maxl'],
                         use_weights=True)

        hp.write_map(os.path.join(combined_dir, 'FFP8_v1_aggregated_map.fits'),
                     combined_map,
                     nest=True,
                     overwrite=True)
        hp.write_cl(os.path.join(combined_dir, 'FFP8_v1_aggregated_cls.fits'),
                    cls,
                    overwrite=True)
        shutil.copyfile(
            os.path.join(cmb_dir, 'FFP8_v1_aggregated_beam.txt'),
            os.path.join(combined_dir, 'FFP8_v1_aggregated_beam.txt'))

        with log.Timer(logger, 'Computing masked pspec and decoupling'):
            masked_powerspec = hp.anafast(combined_map_ring * mask,
                                          lmax=config['matmask_maxl'],
                                          use_weights=True)
            recovered_pspec = np.linalg.solve(coupling_matrix,
                                              masked_powerspec)
            hp.write_cl(os.path.join(combined_dir,
                                     'mask_corrected_spectra.fits'),
                        recovered_pspec,
                        overwrite=True)
예제 #9
0
def get_tf(fname_tf, fname_cmb_unlensed, fname_cmb_lensing, fname_output, fname_hits):
    if os.path.isfile(fname_tf):
        tf = hp.read_cl(fname_tf)
    else:
        inmap = hp.read_map(fname_cmb_unlensed, None) + hp.read_map(fname_cmb_lensing, None)
        inmap *= 1e-6  # into K_CMB
        inmap[0] = hp.remove_dipole(inmap[0])
        outmap = hp.read_map(fname_output, None)
        hits = None # hp.read_map(fname_hits)
        cl_in = map2cl(inmap, hits)
        cl_out = map2cl(outmap, hits)
        tf = cl_out / cl_in
        hp.write_cl(fname_tf, tf)
    return tf
예제 #10
0
def get_tf(fname_tf, fname_cmb_unlensed, fname_cmb_lensing, fname_output,
           fname_hits):
    if os.path.isfile(fname_tf):
        tf = hp.read_cl(fname_tf)
    else:
        inmap = hp.read_map(fname_cmb_unlensed, None) + hp.read_map(
            fname_cmb_lensing, None)
        inmap *= 1e-6  # into K_CMB
        inmap[0] = hp.remove_dipole(inmap[0])
        outmap = hp.read_map(fname_output, None)
        mask = get_mask(fname_hits)
        cl_in = map2cl(inmap, mask)
        cl_out = map2cl(outmap, mask)
        tf = cl_out / cl_in
        hp.write_cl(fname_tf, tf)
    tf[:, lmax_tf:] = 1
    tf[tf > 1] = 1
    return tf
예제 #11
0
def aggregate(config, logger=log.null_logger):
    with log.Timer(logger, 'Step 3: Aggregation').with_level(logging.INFO):
        logger.info('aggregating bands')
        input_fname_fmt = os.path.join(config['output_dir'],
                                       config['band_output_name'])
        input_fname_fmt = input_fname_fmt.replace(
            'Band0', 'Band{nband}_sameres_inverted')
        band_alms, beam, cls, cmb_agg = band_aggregation(
            input_fname_fmt, planck_data.tabbeam[-6:], logger=logger)

        # Output directories
        data_dir = config['output_dir']
        aggregated_map_name = os.path.join(
            data_dir, config['name'] + '_aggregated_cmb.fits')
        aggregated_beam_name = os.path.join(
            data_dir, config['name'] + '_aggregated_beam.txt')
        aggregated_cls_name = os.path.join(
            data_dir, config['name'] + '_aggregated_cls.fits')

        config['aggregated_map_fname'] = aggregated_map_name
        config['aggregated_beam_fname'] = aggregated_beam_name
        config['aggregated_cls_fname'] = aggregated_cls_name

        # Write outputs
        logger.info('bands aggregated, saving to ' + aggregated_map_name)
        hp.write_map(aggregated_map_name,
                     hp.reorder(cmb_agg, r2n=True),
                     nest=True,
                     overwrite=True)
        np.savetxt(aggregated_beam_name, beam)
        logger.info('saving Cls to ' + aggregated_cls_name)
        hp.write_cl(aggregated_cls_name, cls, overwrite=True)
        # Don't save these - waste of disc space
        # for cmap, alms in enumerate(band_alms):
        #     np.savetxt(band_name.format(cmap=cmap), alms.view(float))

    return beam, cls, cmb_agg
예제 #12
0
def main():
	map_prefix='/home/matt/quiet/quiet_maps/'
	i_file=map_prefix+'quiet_simulated_43.1'
	j_file=map_prefix+'quiet_simulated_94.5'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	bands=[43.1,94.5]
	names=['43','95']
	wl=np.array([299792458./(band*1e9) for band in bands])
	N_runs=100
	bins=[1,5,10,20,50]
	cross1_array=[]
	cross2_array=[]
	noise1_array=[]
	noise2_array=[]
	theory1_array=[]
	theory2_array=[]
	
	for i in xrange(N_runs):	
	
		simulate_fields.main()
		#for n in xrange(N_runs):
		ttmp1,ttmp2=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])
		theory1_array.append(ttmp1)
		theory2_array.append(ttmp2)

		tmp1,tmp2=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])

		ntmp1,ntmp2=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])

		cross1_array.append(tmp1)
		cross2_array.append(tmp2)
		noise1_array.append(ntmp1)
		noise2_array.append(ntmp2)

	f=open('cl_theory_FR_QxaU.json','w')
	json.dump([[a for a in theory1_array[i]] for i in xrange(len(cross1_array))],f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','w')
	json.dump([[a for a in theory2_array[i]] for i in xrange(len(cross2_array))],f)
	f.close()	
	theory1=np.mean(theory1_array,axis=0)
	theory2=np.mean(theory2_array,axis=0)
	hp.write_cl('cl_theory_FR_QxaU.fits',theory1)
	hp.write_cl('cl_theory_FR_UxaQ.fits',theory2)
	
	f=open('cl_array_FR_QxaU.json','w')
	json.dump([[a for a in cross1_array[i]] for i in xrange(len(cross1_array))],f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','w')
	json.dump([[a for a in cross2_array[i]] for i in xrange(len(cross2_array))],f)
	f.close()	
	f=open('cl_noise_FR_QxaU.json','w')
	json.dump([[a for a in noise1_array[i]] for i in xrange(len(noise1_array))],f)
	f.close()	
	f=open('cl_noise_FR_UxaQ.json','w')
	json.dump([[a for a in noise2_array[i]] for i in xrange(len(noise2_array))],f)
	f.close()	
	
	L=15*(np.pi/180.)
	k=np.arange(1,500*(L/(2*np.pi)))
	l=2*np.pi*k/L
	cross1=np.mean(cross1_array,axis=0)
	noise1=np.mean(noise1_array,axis=0)
	dcross1=np.std(np.subtract(cross1_array,noise1),axis=0)
	plt.figure()
	plt.plot(l,l*(l+1)/(2*np.pi)*theory1,'r-')
	plt.plot(l,l*(l+1)/(2*np.pi)*(cross1-noise1),'k')
	plt.errorbar(l,l*(l+1)/(2*np.pi)*(cross1-noise1),yerr=l*(l+1)/(2*np.pi)*dcross1,color='black')
	plt.title('Cross 43x95 FR QxaU')
	plt.ylabel('$\\frac{\ell(\ell+1)}{2\pi}C_{\ell}\ \\frac{\mu K^{2}rad}{m^{4}}$')
	plt.xlabel('$\ell$')

	plt.savefig('Cross_43x95_FR_QxaU_flat.eps')
	plt.savefig('Cross_43x95_FR_QxaU_flat.png',format='png')
	
	cross2=np.mean(cross2_array,axis=0)
	noise2=np.mean(noise2_array,axis=0)
	dcross2=np.std(np.subtract(cross2_array,noise2),axis=0)
	plt.figure()
	plt.plot(l,l*(l+1)/(2*np.pi)*theory2,'r-')
	plt.plot(l,l*(l+1)/(2*np.pi)*(cross2-noise2),'k')
	plt.errorbar(l,l*(l+1)/(2*np.pi)*(cross2-noise2),yerr=l*(l+1)/(2*np.pi)*dcross2,color='black')
	plt.title('Cross 43x95 FR UxaQ')
	plt.ylabel('$\\frac{\ell(\ell+1)}{2\pi}C_{\ell}\ \\frac{\mu K^{2}rad}{m^{4}}$')
	plt.xlabel('$\ell$')

	plt.savefig('Cross_43x95_FR_UxaQ_flat.eps')
	plt.savefig('Cross_43x95_FR_UxaQ_flat.png',format='png')
	
	subprocess.call('mv *.eps eps/', shell=True)
예제 #13
0
def proj_all(dens_type=0,
             ngrid=1024,
             nside=512,
             lmax=750,
             rsd=True,
             write=True):

    npix = hp.nside2npix(nside)
    dirin = os.path.join("batch", "ngrid{}".format(ngrid),
                         "dens_type{}".format(dens_type))
    files = glob.glob(os.path.join(dirin, "cat*.fits"))
    ncat = len(files)
    print("{} files in {}".format(len(files), dirin))
    if ncat == 0:
        return
    zval = (0, 0.1, 0.2, 0.3, 0.4, 0.5)

    cpt = 1
    cls = np.zeros((4, lmax + 1))
    for file in files:
        cat = Catalog(file, rsd)
        print("->OK {}".format(cpt))
        for i in range(0, 4):
            ishell = i + 2
            zmax = zval[ishell]
            zmin = zval[ishell - 1]
            zrec, ra, dec = cat.get([zmin, zmax])
            Nsamp = len(ra)
            nbar = Nsamp / (4 * np.pi)
            mp = np.bincount(hp.ang2pix(nside, np.radians(90 - dec),
                                        np.radians(ra)),
                             minlength=npix)
            Nmean = mp.mean()
            map = mp.astype(float) / Nmean - 1.
            #anafast
            cl = hp.anafast(map,
                            lmax=lmax,
                            iter=0,
                            pol=False,
                            use_weights=True,
                            datapath=os.environ['HEALPIXDATA'])
            #remove SN
            cl -= 1. / nbar
            l = np.arange(len(cl))
            s2 = np.sum((2 * l + 1) * cl) / (4 * np.pi)
            print(
                "\t -> shell={}: z in [{},{}]  Nsamp={:5.2f}M sigma={:5.3} SN={:5.2}"
                .format(ishell, zmin, zmax, Nsamp / 1e6, np.sqrt(s2),
                        1 / nbar))
            cls[i, :] += cl
        cpt += 1
    #normalize
    for i in range(0, 4):
        cls[i, :] /= ncat

    if write:
        for i in range(0, 4):
            ishell = i + 2
            dirout = os.path.join(dirin, "shell{:d}".format(ishell))
            os.makedirs(dirout, exist_ok=True)
            clname = "clmean.fits"
            if not rsd:
                clname = "clmean_norsd.fits"
            f1 = os.path.join(dirout, clname)
            print("writing {}".format(f1))
            hp.write_cl(f1, cls[i], overwrite=True)

    return cls
예제 #14
0
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands)

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	
	q_fwhm=[27.3,11.7]
	noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(1024)
	sigma_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	sigma_j=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
예제 #15
0
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands_name)

#	radio_file='/data/wmap/faraday_MW_realdata.fits'
#	cl_file='/home/matt/wmap/simul_scalCls.fits'
#	nside=1024
#	npix=hp.nside2npix(nside)
#	
#	cls=hp.read_cl(cl_file)
#	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
#	
#	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
#	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
#	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
#	wl=np.array([299792458./(band*1e9) for band in bands])
#	num_wl=len(wl)
#	t_array=np.zeros((num_wl,npix))	
#	q_array=np.zeros((num_wl,npix))
#	u_array=np.zeros((num_wl,npix))
#	for i in range(num_wl):
#		tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio);
#		t_array[i],q_array[i],u_array[i]=tmp_cmb
#	iqu_band_i=[t_array[0],q_array[0],u_array[0]]	
#	iqu_band_j=[t_array[1],q_array[1],u_array[1]]	


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
	
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)

	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
예제 #16
0
simul_cmb=hp.synfast(cl_gen,nside,pol=1,new=1,fwhm=0)
rot_1=rotate_tqu(simul_cmb,wl[0],alpha)
rot_2=rotate_tqu(simul_cmb,wl[1],alpha)
Delta_Q=(rot_1[1]-rot_2[1])/const
alpha_U=alpha*rot_1[2]
dQ=hp.ma(Delta_Q)
aU=hp.ma(alpha_U)
for fsky in sky_cut:
	pix=np.arange((1-fsky)*npix).astype(int)	##Pixels to be masked
	mask=np.repeat(False,npix)
	if len(pix) > 0:		##Check if number pixels > 0
		mask[pix]=True		##Mask "Bad" pixels
	dQ.mask = mask
	aU.mask = mask
	cls=hp.anafast(dQ,map2=aU)
	hp.write_cl('cl_FR_QxAU_fsky_{:03d}.fits'.format(int(100*fsky)),cls,dtype='float')
	cl_array.append(cls)

fracs=[array/cl_array[0] for array in cl_array]
mean_fraction=[x.mean() for x in fracs]
median_fraction=[np.median(x) for x in fracs]
cut_list=sky_cut.tolist()
f=open('mean_fraction','w')
json.dump([cut_list,mean_fraction],f)
f.close()
f=open('median_fraction','w')
json.dump([cut_list,median_fraction],f)
f.close()

plt.figure()
plt.plot(sky_cut,mean_fraction,'x',label='mean')
예제 #17
0
pyplot.legend(loc='best')

fig = pyplot.gcf()
fig.set_size_inches(10, 6)
pyplot.savefig("Cls_TE.png")
pyplot.show()

# EB:::
# TB:::

#%%
"""
STEP 3: Spherical harmonic transforms: Cls --> fits file (writing the Cls in a fits file)
"""
# For one Cls array:
hp.write_cl('calc_Cls.fits', Cls[0])

# For all:
hp.write_cl('calc_Cls.fits', Cls)

#%%
"""
STEP 4: Spherical harmonic transforms: fits file --> Cls (reading Cls from a fits file)
"""
# To open just the FIRST extension:
Cls2 = hp.read_cl('COM_PowerSpect_CMB_R2.02.fits',
                  h=True)  #h=True or False does not work.

# To access the header and chose the extension you want to open:
from astropy.io import fits
hdulist = fits.open('COM_PowerSpect_CMB_R2.02.fits')
예제 #18
0
            for fn_clean in [fn_clean1, fn_clean2]:
                if not os.path.isfile(fn_clean):
                    print("File not found: " + fn_clean)
                    there = False
            if not there:
                continue
            m1 = npipeqml.smooth_and_degrade(fn_clean1, bl, nsideqml)
            m2 = npipeqml.smooth_and_degrade(fn_clean2, bl, nsideqml)
            t1 = time()
            print("Estimating C_ell ...", flush=True, end="")
            ee, bb = cross.get_spectra(m1, m2)
            cl_clean = np.zeros([4, lmaxqml + 1])
            cl_clean[1, 2:] = ee
            cl_clean[2, 2:] = bb
            print(" estimated in {:.1f} s".format(time() - t1), flush=True)
            hp.write_cl(fn_cl_clean, cl_clean)  # EE and BB
        if not os.path.isfile(fn_cl_cmb):
            if freq1 == 0 and freq2 == 0:
                set_estimator(cross, 143, 143)
            else:
                set_estimator(cross, freq1, freq2)
            fn_cmb1 = "{}/{:04}/smoothed_cmb_{:04}_{:03}.fits".format(mapcache, mc, mc, freq1)
            fn_cmb2 = "{}/{:04}/smoothed_cmb_{:04}_{:03}.fits".format(mapcache, mc, mc, freq2)
            m1 = npipeqml.smooth_and_degrade(fn_cmb1, bl, nsideqml)
            m2 = npipeqml.smooth_and_degrade(fn_cmb2, bl, nsideqml)
            ee, bb = cross.get_spectra(m1, m2)
            cl_cmb = np.zeros([4, lmaxqml + 1])
            cl_cmb[1, 2:] = ee
            cl_cmb[2, 2:] = bb
            hp.write_cl(fn_cl_cmb, cl_cmb)  # EE and BB
예제 #19
0
m_masked[np.logical_not(mask)] = healpy.UNSEEN
healpy.mollview(m_masked, min=-1, max=1)
show()

#method 2: numpy masked arrays
m_masked = healpy.ma(m)
print(m_masked)
m_masked.mask = np.logical_not(mask)
healpy.mollview(m_masked.filled(), min=-1, max=1)
show()
figure()
plot(m_masked[:10000].compressed())
show()

healpy.write_map('wmap_masked.fits', m_masked.filled(), coord='G')

#!Spectra
#!~~~~~~~

cl = healpy.anafast(m_masked.filled(), lmax=1024)
ell = np.arange(len(cl))
plt.figure()
plt.plot(ell, ell * (ell+1) * cl)
plt.xlabel('ell'); plt.ylabel('ell(ell+1)cl'); plt.grid()
show()

healpy.write_cl('cl.fits', cl)

from glob import glob #bash like file pattern matching
print(glob('*.fits'))
예제 #20
0
    labelBase = "%.2f<z<%.2f" % (zMinCib, zMaxCib)
    unlensedMapFilename = "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_unlensed/cib_fullsky_ns2048_zmin%.2f_zmax%.2f_nu217_ns2048_tot.fits" % (
        zMinCib, zMaxCib)
    unlensedClFilename = "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_unlensed_cl/cib_fullsky_ns2048_zmin%.2f_zmax%.2f_nu217_ns2048_tot.dat" % (
        zMinCib, zMaxCib)
    unlensedEll = np.arange(0, lmax + 1)
    if os.path.exists(unlensedClFilename):
        unlensedCl = hp.read_cl(unlensedClFilename)
    else:
        unlensedMap = hp.read_map(unlensedMapFilename)
        unlensedMap = np.nan_to_num(unlensedMap)
        unlensedCl = hp.anafast(unlensedMap)
        unlensedCl[0] = 0
        unlensedCl[1] = 0
        hp.write_cl(unlensedClFilename, unlensedCl)
    unlensedCl = savitzky_golay(unlensedCl, 75, 3)
    summedCl += unlensedCl
    # if i%dPlot == 0:
    # plt.semilogy(unlensedEll[1:], unlensedCl[1:], colours[colourIndex], ls='--', label=labelBase + " unlensed", linewidth=2, axes=axes)
    # plt.semilogy(unlensedEll[1:], summedCl[1:], colours[colourIndex], ls='--', label="z<%.2f"%zMaxCib, linewidth=2, axes=axes)

    lensedMapFilename = "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_lensed/lensed_cib_fullsky_ns2048_zmin%.2f_zmax%.2f_nu217_ns2048_tot.fits" % (
        zMinCib, zMaxCib)
    lensedClFilename = "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_lensed_cl/lensed_cib_fullsky_ns2048_zmin%.2f_zmax%.2f_nu217_ns2048_tot.dat" % (
        zMinCib, zMaxCib)
    lensedEll = np.arange(0, lmax + 1)
    if os.path.exists(lensedClFilename):
        lensedCl = hp.read_cl(lensedClFilename)
    else:
        lensedMap = hp.read_map(lensedMapFilename)
예제 #21
0
def fix_for_mask(config, logger=log.null_logger):
    with log.Timer(logger,
                   'Step 4: Accounting for mask').with_level(logging.INFO):
        coupling_matrix_fname = config.get('mask_coupling_matrix_fname')

        # If the coupling matrix is not specified, we need to make it
        if not coupling_matrix_fname:
            output_file = os.path.join(config['output_dir'],
                                       'master_deconv_spectra.fits')

            maxl = config.get('matmask_maxl', 2500)

            # mrsp_matmask -t -w -v -z -l 1024 -o TQU <smap> <mask> <output_file>
            cmd = [
                matmask_command, '-t', '-w', '-v', '-z', '-l',
                str(maxl), '-o', 'TQU', config['aggregated_map_fname'],
                config['mask_fname'], output_file
            ]

            rc = log.run_command(cmd, timeout=5e-2, logger=logger)

            if rc != 0:
                raise RuntimeError(matmask_command +
                                   ' failed with exit code: ' + str(rc))

            # Outputs are:
            # master_deconv_spectra.fits
            # master_deconv_spectra_maskedmap_pspec.fits
            # master_deconv_spectra_mask_pspec.fits
            # master_deconv_spectra_mask_spec_radii.fits
            # master_deconv_spectra_coupling_matrices.fits
            if config.get('delete_tmps', True):
                tmps = [
                    os.path.join(config['output_dir'], fname) for fname in (
                        'master_deconv_spectra.fits',
                        'master_deconv_spectra_maskedmap_pspec.fits',
                        'master_deconv_spectra_mask_pspec.fits',
                        'master_deconv_spectra_mask_spec_radii.fits',
                        'master_deconv_spectra_coupling_matrices.fits')
                ]
                for tmp in tmps:
                    logger.debug('deleting ' + tmp)
                    os.remove(tmp)

            coupling_matrix_fname = os.path.join(
                config['output_dir'],
                'master_deconv_spectra_coupling_matrices.fits')

        coupling_matrix = fitsio.FITS(coupling_matrix_fname)[0].read()

        # FIXME: mrsp_matmask does not produce the correct spectrum, but it does
        # produce the correct coupling matrix. we can use the coupling matrix
        # and invert it ourselves.
        # TODO: get to the bottom of this. Why does mrsp_matmask give the wrong
        # answers? Can Florent or JLS figure this out?
        mask = hp.read_map(config['mask_fname'], verbose=False)
        aggregated_cmb = hp.read_map(config['aggregated_map_fname'],
                                     verbose=False)
        # 1e6 to convert K -> mu K
        masked_powerspec = hp.anafast(mask * 1e6 * aggregated_cmb,
                                      lmax=coupling_matrix.shape[0] - 1)
        recovered_pspec = np.linalg.solve(coupling_matrix, masked_powerspec)
        ells = np.arange(recovered_pspec.size)
        ll1 = ells * (ells + 1) / (2 * np.pi) / hp.pixwin(
            2048, lmax=ells.size - 1)**2
        hp.write_cl(
            os.path.join(config['output_dir'], 'mask_corrected_spectra.fits'),
            ll1 * recovered_pspec)
예제 #22
0
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands):
	print "Computer Cross Correlations for Bands "+str(bands)

	temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits'
	planck_T=hp.read_map(temperature_file)
	planck_T*=1e-6

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	mask_hdu=fits.open('/data/wmap/wmap_polarization_analysis_mask_r9_9yr_v5.fits')
	mask=mask_hdu[1].data.field(0)
	mask=hp.reorder(mask,n2r=1)
	#mask=hdu_i['mask'].data
	mask=hp.ud_grade(mask,nside_out=128)
	pix=np.where(mask != 0)
	pix=np.array(pix).reshape(len(pix[0]))
	pix_bad=np.where(mask == 0)
	field_pixels=hdu_i['FIELD PIXELS'].data
	#iqu_band_i[1]+=sigma_i[0]/1.
	#iqu_band_i[2]+=sigma_i[1]/1.
	#iqu_band_j[1]+=sigma_j[0]/1.
	#iqu_band_j[2]+=sigma_j[1]/1.
	
	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring')
		
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383)
	planck_T=hp.smoothing(planck_T,fwhm=np.pi/180.,lmax=383)
	alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_i[2] 
	alpha_q=-alpha_radio*iqu_band_i[1]
	
	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)

	mask_bool1=np.repeat(True,len(Delta_Q))
	mask_bool2=np.repeat(True,len(Delta_Q))
	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	for field1 in xrange(4):
		for field2 in xrange(field1,4):
	
			pix_cmb1=field_pixels.field(field1)	
			pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
			tmp=np.zeros(hp.nside2npix(1024))
			tmp[pix_cmb1]=1
			tmp=hp.ud_grade(tmp,128)
			mask_bool1[np.nonzero(tmp)]=False
			
			pix_cmb2=field_pixels.field(field2)
			pix_cmb2=pix_cmb2[np.nonzero(pix_cmb2)]	##Take Pixels From Field 2
			tmp=np.zeros(hp.nside2npix(1024))
			tmp[pix_cmb2]=1
			tmp=hp.ud_grade(tmp,128)
			mask_bool2[np.nonzero(tmp)]=False	##Create Masks for each QUIET FIELD

			DQm.mask=mask_bool1
			DUm.mask=mask_bool1
			aQm.mask=mask_bool2
			aUm.mask=mask_bool2
	
			TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U])
			TEm=hp.ma(TE_map)
			TEm[0].mask=mask_bool1
			TEm[1].mask=mask_bool2
			TEm[2].mask=mask_bool2

			#wl=hp.anafast((~mask_bool1).astype(int),map2=(~mask_bool2).astype(int))
			#l=np.arange(len(wl))
			#mixing=[[Mll.Mll(wl,l1,l2) for l1 in l] for l2 in l]
			#Bl=np.exp(-l(l+1)*(60**2+11.7**2)*((np.pi/(180.*60.))/(2*np.sqrt(2*np.log(2))))**2/2)
			#K=[m*Bl**2 for m in mixing]
			#K_inv=K.getI
			tmp_cl1=hp.anafast(DQm,map2=aUm)
			tmp_cl2=hp.anafast(DUm,map2=aQm)			
			l=np.arange(len(tmp_cl1))
			Bl_60=np.exp(-l*(l+1)*((60.0*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
			Bl_11=np.exp(-l*(l+1)*((11.7*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
			Bl_27=np.exp(-l*(l+1)*((27.3*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
			Bl_factor=Bl_60**2*Bl_11*Bl_27
			cross1_array.append(tmp_cl1/Bl_factor)
			cross2_array.append(tmp_cl2/Bl_factor)
			cross_tmp=hp.anafast(TEm,pol=1,nspec=4)
			cross3_array.append(cross_tmp[-1]/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross3=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	dcross1=np.std(cross1_array,axis=0)	##Average over all Cross Spectra
	dcross2=np.std(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	hp.write_cl('cl_'+bands+'_FR_TE_cmb.fits',cross3)
	hp.write_cl('Nl_'+bands+'_FR_QxaU.fits',dcross1)
	hp.write_cl('Nl_'+bands+'_FR_UxaQ.fits',dcross2)

	Delta_Q[pix_bad]=hp.UNSEEN
	Delta_U[pix_bad]=hp.UNSEEN
	alpha_u[pix_bad]=hp.UNSEEN
	alpha_q[pix_bad]=hp.UNSEEN
	planck_T[pix_bad]=hp.UNSEEN
	prim=fits.PrimaryHDU()
	pix_col=fits.Column(name='PIXEL',format='1J',array=pix)
	col_dq=fits.Column(name='SIGNAL',format='1E',unit='K/m^2',array=Delta_Q[pix])
	col_du=fits.Column(name='SIGNAL',format='1E',unit='K/m^2',array=Delta_U[pix])
	col_aq=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=alpha_q[pix])
	col_au=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=alpha_u[pix])
	col_te1=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=TE_map[0][pix])
	col_te2=fits.Column(name='STOKES Q',format='1E',unit='K/m^2',array=TE_map[1][pix])
	col_te3=fits.Column(name='STOKES U',format='1E',unit='K/m^2',array=TE_map[2][pix])	
	data=[col_dq,col_du,col_aq,col_au]
	names=['Q','aU','U','aQ','TE']
	for i in xrange(len(data)):
		cols=fits.ColDefs([pix_col,data[i]])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
                tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
                tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
                tbhdu.header["NSIDE"]=(128,'Healpix Resolution paramter')
                tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
                tbhdu.header['OBS_NPIX']=(len(pix),'Number of pixels observed')
                tbhdu.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
                tblist=fits.HDUList([prim,tbhdu])
		tblist.writeto('quiet_cross_'+names[i]+'.fits',clobber=True)
	
	return (cross1,cross2,cross3,dcross1,dcross2)
예제 #23
0
theoryCls = [theory_CL_TT, theory_CL_EE, theory_CL_BB]
# print "lensed nans:", lensedT[np.isnan(lensedT)].shape[0]

spectrumIds = ['TT','EE','BB']
idIndices = np.arange(0,len(spectrumIds))

if os.path.exists(filenameBase % ('TT')): #load spectra if saved
    print "Reading lensed cls from file..."
    lensedCls = [hp.read_cl(filenameBase % (_id)) for _id in spectrumIds]
else:
    print "Getting lensed cls from map..."
    lensedCls = hp.anafast([lensedT, lensedQ, lensedU], lmax=lmax)
    print "Writing lensed cls to file..."
    for _id in spectrumIds:
        curCl = lensedCls[spectrumIds.index(_id)]
        hp.write_cl(filenameBase % (_id), curCl)
#[lensedTT, lensedEE, lensedBB] = [lensedCls[0], lensedCls[1], lensedCls[2]]

print "Plotting..."
plt.figure()
plt.title("TEB Power Spectra")
plt.xlabel(r"$l$", fontsize=30)
plt.ylabel(r"$l(l+1)C_l/2\pi$", fontsize=30)
unlensed_linecol = ['#053C5E','#FFCF99','#000000'] # BB is all zero so colour doesnt matter
lensed_linecol = ['#89BD9E','#DB222A','#3454D1']
lensed_theory_linecol = ['#89BD9E','#DB222A','#3454D1']
labels = ['TT', 'EE', 'BB']
for i in idIndices:
    print i
    curUnlensed = unlensedCls[i]
    curLensed = lensedCls[i]
예제 #24
0
def smooth_combine(maps_and_weights, variance_maps_and_weights=None, fwhm=np.radians(2.0), degraded_nside=32, spectra=False, smooth_mask=False, spectra_mask=False, base_filename="out", root_folder=".", metadata={}, chi2=False):
    """Combine, smooth, take-spectra, write metadata

    The maps (I or IQU) are first combined with their own weights, then smoothed and degraded.
    This function writes a combined smoothed and degraded map, a spectra 1 or 6 components (not degraded) and a json file with metadata
    
    Parameters
    ----------
    maps_and_weights : list of tuples
        [(map1_array, map1_weight), (map2_array, map2_weight), ...]
        each tuple contains a I or IQU map to be combined with its own weight to give the final map
    variance_maps_and_weights : list of tuples
        same as maps_and_weights but containing variances
    fwhm : double
        smoothing gaussian beam width in radians
    degraded_nside : integer
        nside of the output map
    spectra : bool
        whether to compute and write angular power spectra of the combined map
    smooth_mask, spectra_mask : bool array
        masks for smoothing and spectra, same nside of input maps, masks shoud be true *inside* the masked region. spectra are masked with both masks. Typically smooth_mask should be a point source mask, while spectra_mask a galaxy plane mask.
    base_filename : string
        base filename of the output files
    root_folder : string
        root path of the output files
    metadata : dict
        initial state of the metadata to be written to the json files

    Returns
    -------
    None : all outputs are written to fits files
    """

    log.debug("smooth_combine")
    # check if I or IQU
    is_IQU = len(maps_and_weights[0][0]) == 3
    if not is_IQU:
        assert hp.isnpixok(len(maps_and_weights[0][0])), "Input maps must have either 1 or 3 components"

    combined_map = combine_maps(maps_and_weights)
    for m in combined_map:
        m.mask |= smooth_mask
    if not variance_maps_and_weights is None:
        combined_variance_map = combine_maps(variance_maps_and_weights)
        for m in combined_variance_map:
            m.mask |= smooth_mask

    monopole_I, dipole_I = hp.fit_dipole(combined_map[0], gal_cut=30)
    # remove monopole, only I
    combined_map[0] -= monopole_I

    if spectra:
        # save original masks
        orig_mask = [m.mask.copy() for m in combined_map] 

        # spectra
        log.debug("Anafast")
        for m in combined_map:
            m.mask |= spectra_mask
        # dividing by two in order to recover the same noise as the average map (M1 - M2)/2
        cl = hp.anafast([m/2. for m in combined_map])
        # sky fraction
        sky_frac = (~combined_map[0].mask).sum()/float(len(combined_map[0]))

        if is_IQU:
            for cl_comp in cl:
                cl_comp /= sky_frac
        else:
            cl /= sky_frac

        # write spectra
        log.debug("Write cl: " + base_filename + "_cl.fits")
        try:
            hp.write_cl(os.path.join(root_folder, base_filename + "_cl.fits"), cl)
        except exceptions.NotImplementedError:
            log.error("Write IQU Cls to fits requires more recent version of healpy")
        del cl

        if not variance_maps_and_weights is None:
            # expected cl from white noise
            # /4. to have same normalization of cl
            metadata["whitenoise_cl"] = utils.get_whitenoise_cl(combined_variance_map[0]/4., mask=combined_map[0].mask) / sky_frac
            if is_IQU:
                # /2. is the mean, /4. is the half difference in power
                metadata["whitenoise_cl_P"] = utils.get_whitenoise_cl((combined_variance_map[1] + combined_variance_map[2])/2./4., mask=combined_map[1].mask | combined_map[2].mask) / sky_frac 

        # restore masks
        for m, mask in zip(combined_map, orig_mask):
            m.mask = mask

    # smooth
    log.debug("Smooth")

    smoothed_map = hp.smoothing(combined_map, fwhm=fwhm)

    if not variance_maps_and_weights is None:
        log.debug("Smooth Variance")
        if is_IQU:
            smoothed_variance_map = [utils.smooth_variance_map(var, fwhm=fwhm) for var in combined_variance_map]
            for comp,m,var in zip("IQU", smoothed_map, smoothed_variance_map):
                 metadata["map_chi2_%s" % comp] = np.mean(m**2 / var) 
            for comp,m,var in zip("IQU", combined_map, combined_variance_map):
                 metadata["map_unsm_chi2_%s" % comp] = np.mean(m**2 / var) 
        else:
            smoothed_variance_map = utils.smooth_variance_map(combined_variance_map[0], fwhm=fwhm)
            metadata["map_chi2"] = np.mean(smoothed_map**2 / smoothed_variance_map) 
            metadata["map_unsm_chi2"] = np.mean(combined_map[0]**2 / combined_variance_map[0]) 

        del smoothed_variance_map
    # removed downgrade of variance
    # smoothed_variance_map = hp.ud_grade(smoothed_variance_map, degraded_nside, power=2)

    # fits
    log.info("Write fits map: " + base_filename + "_map.fits")
    smoothed_map = hp.ud_grade(smoothed_map, degraded_nside)
    hp.write_map(os.path.join(root_folder, base_filename + "_map.fits"), smoothed_map)

    # metadata
    metadata["base_file_name"] = base_filename
    metadata["file_name"] = base_filename + "_cl.fits"
    metadata["file_type"] += "_cl"
    metadata["removed_monopole_I"] = monopole_I
    metadata["dipole_I"] = tuple(dipole_I)

    if spectra:
        metadata["sky_fraction"] = sky_frac
        with open(os.path.join(root_folder, base_filename + "_cl.json"), 'w') as f:
            json.dump(metadata, f, indent=4)

    metadata["file_name"] = base_filename + "_map.fits"
    metadata["file_type"] = metadata["file_type"].replace("_cl","_map")

    metadata["smooth_fwhm_deg"] = "%.2f" % np.degrees(fwhm)
    metadata["out_nside"] = degraded_nside
    if is_IQU:
        for comp,m in zip("IQU", smoothed_map):
             metadata["map_p2p_%s" % comp] = m.ptp()
             metadata["map_std_%s" % comp] = m.std()
    else:
        metadata["map_p2p_I"] = smoothed_map.ptp()
        metadata["map_std_I"] = smoothed_map.std()

    with open(os.path.join(root_folder, base_filename + "_map.json"), 'w') as f:
        json.dump(metadata, f, indent=4)
sim_CL_EE_lensed = savitzky_golay(sim_CL_EE_lensed, 75, 3)
sim_CL_BB_lensed = savitzky_golay(sim_CL_BB_lensed, 75, 3)

#print theory_TT_ell
#print theory_CL_TT.shape

TEB_cls = np.array([[theory_CL_TT[_l], theory_CL_EE[_l], 0, theory_CL_TE[_l]] for _l in theory_TT_ell]) #fill BB with zeroes
print TEB_cls.shape
print "starting lensing convolution..."
lensed_theory_TEB = correlations.lensed_cls(TEB_cls, theory_CL_PP)
theory_CL_TT_lensed = np.array([_cl[0] for _cl in lensed_theory_TEB])
theory_CL_EE_lensed = np.array([_cl[1] for _cl in lensed_theory_TEB])
theory_CL_BB_lensed = np.array([_cl[2] for _cl in lensed_theory_TEB])
# theory_CL_TE_lensed = np.array([_cl[3] for _cl in lensed_theory_TEB])
print "writing camb cl..."
hp.write_cl(lensed_theory_cl_file % ('TT'), theory_CL_TT_lensed)
hp.write_cl(lensed_theory_cl_file % ('EE'), theory_CL_EE_lensed)
hp.write_cl(lensed_theory_cl_file % ('BB'), theory_CL_BB_lensed)

# print "reading theory cl..." #if you've already done calculation before and don't want to wait
# theory_CL_TT_lensed = hp.read_cl(lensed_theory_cl_file % ('TT'))
# theory_CL_EE_lensed = hp.read_cl(lensed_theory_cl_file % ('EE'))
# theory_CL_BB_lensed = hp.read_cl(lensed_theory_cl_file % ('BB'))

theory_TT_ell_lensed = np.arange(len(theory_CL_TT_lensed))

#fractional differences
print "calculating differences..."
lenspix_vs_primary_TT = (sim_CL_TT_lensed - theory_CL_TT) / theory_CL_TT
#camb_vs_lenspix = (sim_CL_TT_lensed - theory_CL_TT_lensed) / theory_CL_TT_lensed
camb_vs_primary_TT = (theory_CL_TT_lensed - theory_CL_TT) / theory_CL_TT
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False):
	print "Computing Cross Correlations for Bands "+str(bands_name)

	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits'
	nside=1024
	npix=hp.nside2npix(nside)
	
	cls=hp.read_cl(cl_file)
	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
	wl=np.array([299792458./(band*1e9) for band in bands])
	num_wl=len(wl)
	t_array=np.zeros((num_wl,npix))	
	q_array=np.zeros((num_wl,npix))
	u_array=np.zeros((num_wl,npix))
	for i in range(num_wl):
		tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio);
		t_array[i],q_array[i],u_array[i]=tmp_cmb
	iqu_band_i=[t_array[0],q_array[0],u_array[0]]	
	iqu_band_j=[t_array[1],q_array[1],u_array[1]]	


	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits'
	planck_T=hp.read_map(temperature_file)
	planck_T*=1e-6
	hdu_i=fits.open(i_file)
	field_pixels=hdu_i['FIELD PIXELS'].data
	hdu_i.close()
	
	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383)
	planck_T=hp.smoothing(planck_T,fwhm=np.pi/180.,lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2]
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	Bl_factor=np.repeat(1.,3*128)
	for field1 in xrange(4):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]=1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
	#	mask_bool1[np.where(P<.7e-6)]=True
		
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1

		TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U])
		TEm=hp.ma(TE_map)
		TEm[0].mask=mask_bool1
		TEm[1].mask=mask_bool1
		TEm[2].mask=mask_bool1
		
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor)
		cross_tmp=hp.anafast(TEm,pol=1,nspec=4)
		cross3_array.append(cross_tmp[-1]/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross3=np.mean(cross3_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2)
	hp.write_cl('cl_'+bands_name+'_FR_TE_cmb.fits',cross3)
	return (cross1,cross2,cross3)
예제 #27
0
    # Note that generally it's not a good idea to purify both, since you'll lose sensitivity on E
    return f2


# We initialize two workspaces for the non-pure and pure fields:
f20 = get_fields()
w = nmt.NmtWorkspace();
w.compute_coupling_matrix(f20, f20, b)


# This wraps up the two steps needed to compute the power spectrum
# once the workspace has been initialized
def compute_master(f_a, f_b, wsp):
    cl_coupled = nmt.compute_coupled_cell(f_a, f_b)
    cl_decoupled = wsp.decouple_cell(cl_coupled)
    return cl_decoupled


# We now iterate over several simulations, computing the power spectrum for each of them
data = []
for i in np.arange(nsim):
    print(i, nsim)
    f2 = get_fields()
    data.append(compute_master(f2, f2, w))
data = np.array(data)
clnp_mean = np.mean(data, axis=0)
clnp_std = np.std(data, axis=0)

hp.write_cl('cls_BBEE_512.fits', clnp_mean)
hp.write_cl('scls_BBEE_512.fits', clnp_std)
예제 #28
0
파일: debias.py 프로젝트: keirkwame/SILC
n1alms = hp.map2alm(n1map,lmax=3399)
hp.almxfl(n1alms,pixrecip,inplace=True)
inpalms = hp.map2alm(inpmap,lmax=3399)
hp.almxfl(inpalms,ilcbeam*pixrecip,inplace=True)

residalms = n1alms - inpalms

biascls = hp.alm2cl(inpalms,alms2=residalms)
hp.write_cl('/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_bias.fits',biascls)

debiascls = n1cls - (2.*biascls)
hp.write_cl('/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_debias.fits',debiascls)'''

biascls = hp.read_cl(
    '/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_bias.fits'
)
fracbias = biascls / inpcls
hp.write_cl(
    '/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_fracbias.fits',
    fracbias)
'''residcls = hp.alm2cl(residalms)
hp.write_cl('/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_resid.fits',residcls)

deresidcls = n1cls - (2.*biascls) - residcls
hp.write_cl('/Users/keir/Documents/s2let_ilc_planck/ffp8_pla_data/s2let_ilc_covar15_ffp8_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_0_4_recon_cls_deresid.fits',deresidcls)

#Check results
print np.sum(n1cls[2:] - inpcls[2:] - residcls[2:] - (2.*biascls[2:]))
print np.sum(n1cls[2:])'''
print np.mean(fracbias[2:1000])
print np.mean(fracbias[2:])
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False):
	print "Computing Cross Correlations for Bands "+str(bands)

	q_fwhm=[27.3,11.7]
	#noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(1024)
	sigma_q_i,sigma_u_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[0]*np.random.normal(0,1,npix)]
	sigma_q_j,sigma_u_j=[noise_const[1]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]

	sigma_q_i=hp.smoothing(sigma_q_i,fwhm=q_fwhm[0]*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_u_i,fwhm=q_fwhm[0]*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_q_j,fwhm=q_fwhm[1]*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_u_j,fwhm=q_fwhm[1]*np.pi/(180.*60.),verbose=False)	

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')*np.random.normal(0,1,hp.nside2npix(128))
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	#sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	#sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	#mask_hdu=fits.open('/data/wmap/wmap_polarization_analysis_mask_r9_9yr_v5.fits')
	#mask=mask_hdu[1].data.field(0)
	#mask_hdu.close()
	#mask=hp.reorder(mask,n2r=1)
	#mask=hdu_i['mask'].data
	#mask=hp.ud_grade(mask,nside_out=128)
	#pix=np.where(mask != 0)
	#pix=np.array(pix).reshape(len(pix[0]))
	#pix_bad=np.where(mask == 0)
	field_pixels=hdu_i['FIELD PIXELS'].data
	
	iqu_band_i[1]+=np.copy(sigma_q_i)
	iqu_band_i[2]+=np.copy(sigma_u_i)
	iqu_band_j[1]+=np.copy(sigma_q_j)
	iqu_band_j[2]+=np.copy(sigma_u_j)
	hdu_i.close()
	hdu_j.close()
	
	sigma_q_i=hp.smoothing(sigma_q_i,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_u_i,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_q_j,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_u_j,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)	

	sigma_q_i=hp.ud_grade(sigma_q_i,128)
	sigma_u_i=hp.ud_grade(sigma_u_i,128)
	sigma_q_j=hp.ud_grade(sigma_q_j,128)
	sigma_u_j=hp.ud_grade(sigma_u_j,128)
		
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	
        dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits'
        dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits'
        
        ##Dust intensity scaling factor
        hdu_dust_t=fits.open(dust_t_file)
        dust_t=hdu_dust_t[1].data.field('TEMP_ML')
        hdu_dust_t.close()
        
        dust_t=hp.reorder(dust_t,n2r=1)
        dust_t=hp.ud_grade(dust_t,nside_out)
        
        hdu_dust_b=fits.open(dust_b_file)
        dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL')
        hdu_dust_b.close
        
        dust_beta=hp.reorder(dust_beta,n2r=1)	
        dust_beta=hp.ud_grade(dust_beta,nside_out)
        
        gamma_dust=6.626e-34/(1.38e-23*dust_t)
        freqs=[43.1,94.5]
        krj_to_kcmb=np.ones_like(freqs)
        dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1.)/(np.exp(gamma_dust*x*1e9)-1.)* (x/353.)**(1.+dust_beta) for i,x in enumerate(freqs)])
        sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in freqs])

	hdu_sync=fits.open(synchrotron_file)
	sync_q=hdu_sync[1].data.field(0)
	sync_u=hdu_sync[1].data.field(1)
	
	sync_q=hp.reorder(sync_q,n2r=1)
	sync_q=hp.ud_grade(sync_q,nside_out=128)

	
	sync_u=hp.reorder(sync_u,n2r=1)
	sync_u=hp.ud_grade(sync_u,nside_out=128)
	hdu_sync.close()
	
	hdu_dust=fits.open(dust_file)
	dust_q=hdu_dust[1].data.field(0)
	dust_u=hdu_dust[1].data.field(1)
	hdu_dust.close()
	
	dust_q=hp.reorder(dust_q,n2r=1)
	dust_q=hp.smoothing(dust_q,fwhm=np.sqrt(smoothing_scale**2-10.0**2)*np.pi/(180.*60.),verbose=False)
	dust_q=hp.ud_grade(dust_q,128)
	
	dust_u=hp.reorder(dust_u,n2r=1)
	dust_u=hp.smoothing(dust_u,fwhm=np.sqrt(smoothing_scale**2-10.0**2)*np.pi/(180.*60.),verbose=False)
	dust_u=hp.ud_grade(dust_u,128)
    
        gamma_sync_q=[]
        gamma_sync_u=[]
        delta_dust_q=[]
        delta_dust_u=[]
        iqu_array=[iqu_band_i,iqu_band_j]
        iqu_array_fr=[]
        for cnt,iqu in enumerate(iqu_array):
                #redefine temps to make math easier
                mask_bool1=np.repeat(True,len(dust_q))
                pix_cmb1=field_pixels.field(0)	
                pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
                tmp=np.zeros(hp.nside2npix(1024))
                tmp[pix_cmb1]+=1
                tmp=hp.ud_grade(tmp,128)
                mask_bool1[np.nonzero(tmp)]=False

                dq=hp.ma(dust_factor[cnt]*dust_q)
                du=hp.ma(dust_factor[cnt]*dust_u)
                sq=hp.ma(sync_factor[cnt]*sync_q)
                su=hp.ma(sync_factor[cnt]*sync_u)

                dq.mask=mask_bool1
                du.mask=mask_bool1
                sq.mask=mask_bool1
                su.mask=mask_bool1
                #normalization factors for scaling 
                gamma_sync_q= np.sum(iqu[1]*sq)/np.sum(sq**2)- np.sum(dq*sq)/np.sum(sq**2)*( (np.sum(sq**2)*np.sum(iqu[1]*dq)-np.sum(iqu[1]*sq)*np.sum(sq*dq))/(np.sum(dq**2)*np.sum(sq**2)-np.sum(sq*dq)**2) )
                delta_dust_q= (np.sum(sq**2)*np.sum(iqu[1]*dq)-np.sum(iqu[1]*sq)*np.sum(sq*dq))/( np.sum(dq**2)*np.sum(sq**2)-np.sum(sq*dq)**2)

                gamma_sync_u= np.sum(iqu[2]*su)/np.sum(su**2)- np.sum(du*su)/np.sum(su**2)*( (np.sum(su**2)*np.sum(iqu[2]*du)-np.sum(iqu[2]*su)*np.sum(su*du))/(np.sum(du**2)*np.sum(su**2)-np.sum(su*du)**2) )
                delta_dust_u= (np.sum(su**2)*np.sum(iqu[2]*du)-np.sum(iqu[2]*su)*np.sum(su*du))/( np.sum(du**2)*np.sum(su**2)-np.sum(su*du)**2)

                iqu_array_fr.append(np.array([iqu[0],iqu[1]-gamma_sync_q*sq-delta_dust_q*dq,iqu[2]-gamma_sync_u*su-delta_dust_u*du]))

        iqu_band_i=np.copy(iqu_array_fr[0])
        iqu_band_j=np.copy(iqu_array_fr[1])
	#iqu_band_i[1]-= gamma_sync_q[0]*sync_q + delta_dust_q[0]*dust_q
	#iqu_band_i[2]-= gamma_sync_u[0]*sync_u + delta_dust_u[0]*dust_u
	#iqu_band_j[1]-= gamma_sync_q[1]*sync_q + delta_dust_q[1]*dust_q
	#iqu_band_j[2]-= gamma_sync_u[1]*sync_u + delta_dust_u[1]*dust_u

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	sqi=hp.ma(sigma_q_i)
	sui=hp.ma(sigma_u_i)
	sqj=hp.ma(sigma_q_j)
	suj=hp.ma(sigma_u_j)
	salpha=hp.ma(delta_alpha_radio)
	alpham=hp.ma(alpha_radio)
	um=hp.ma(iqu_band_j[2])
	qm=hp.ma(iqu_band_j[1])
	
	cross1_array=[]
	cross2_array=[]
	Ndq_array=[]
	Ndu_array=[]
	Nau_array=[]
	Naq_array=[]
	Bl_factor=np.repeat(1.,3*128)
	l=np.arange(3*128)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)
	pix_area=hp.nside2pixarea(128)
	pix_area=(smoothing_scale*np.pi/(180.*60))**2
	#ipdb.set_trace()
	for field1 in xrange(1):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]+=1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
		#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
		
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1
		sqi.mask=mask_bool1
		sui.mask=mask_bool1
		sqj.mask=mask_bool1
		suj.mask=mask_bool1
		salpha.mask=mask_bool1
		alpham.mask=mask_bool1
		um.mask=mask_bool1
		qm.mask=mask_bool1
		#ipdb.set_trace()
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor**2)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor**2)

                ##calculate theoretical variance for correlations
		Ndq_array.append((sqi**2+sqj**2).sum()*(pix_area/const)**2/(4.*np.pi))
		Ndu_array.append((sui**2+suj**2).sum()*(pix_area/const)**2/(4.*np.pi))
		Nau_array.append(((salpha*um+alpham*suj+salpha*suj)**2).sum()*pix_area**2/(4.*np.pi))
		Naq_array.append(((salpha*qm+alpham*sqj+salpha*sqj)**2).sum()*pix_area**2/(4.*np.pi))
		#Ndq_array.append(hp.anafast(sqi,map2=sqj))
		#Ndu_array.append(hp.anafast(sui,map2=suj))
		#Nau_array.append(hp.anafast(um,map2=salpha)+hp.anafast(suj,map2=alpham)+hp.anafast(suj,map2=salpha))
		#Naq_array.append(hp.anafast(qm,map2=salpha)+hp.anafast(suj,map2=alpham)+hp.anafast(sqj,map2=salpha))
		#ipdb.set_trace()
		
        


	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	#N_dq=np.mean(Ndq_array,axis=0)	##Average over all Cross Spectra
	#N_du=np.mean(Ndu_array,axis=0)	##Average over all Cross Spectra
	#N_au=np.mean(Nau_array,axis=0)	##Average over all Cross Spectra
	#N_aq=np.mean(Naq_array,axis=0)	##Average over all Cross Spectra
	cross1=cross1_array[0]
	cross2=cross2_array[0]
	N_dq=Ndq_array[0]
	N_du=Ndu_array[0]
	N_au=Nau_array[0]
	N_aq=Naq_array[0]
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)


	return (cross1,cross2,N_dq,N_du,N_au,N_aq)
예제 #30
0
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,field):
	print "Computer Cross Correlations for Bands "+str(bands)+" cmb field"+str(field)

	temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits'
	planck_T=hp.read_map(temperature_file)
	planck_T*=1e-6

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	mask=hdu_i['mask'].data
	mask=hp.ud_grade(mask,nside_out=128)
	pix=np.where(mask != 0)
	pix=np.array(pix).reshape(len(pix[0]))
	pix_bad=np.where(mask == 0)
	iqu_band_i[1:]+=sigma_i
	iqu_band_j[1:]+=sigma_j
	
	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383)
	planck_T=hp.smoothing(planck_T,pol=1,fwhm=np.pi/180.,lmax=383)
	alpha_radio=hp.smoothing(alpha_radio,pol=1,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const 
	alpha_u=alpha_radio*iqu_band_i[1] 
	alpha_q=-alpha_radio*iqu_band_i[2]
	
	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	mask_bool=np.repeat(True,len(Delta_Q))
	mask_bool[np.nonzero(mask)]=False

	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool

	Delta_Q[pix_bad]=hp.UNSEEN
	Delta_U[pix_bad]=hp.UNSEEN
	alpha_u[pix_bad]=hp.UNSEEN
	alpha_q[pix_bad]=hp.UNSEEN
	planck_T[pix_bad]=hp.UNSEEN
	TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U])
	TEm=hp.ma(TE_map)
	TEm[0].mask=mask_bool
	TEm[1].mask=mask_bool
	TEm[2].mask=mask_bool

	cross1=hp.anafast(DQm,map2=aUm)
	cross2=hp.anafast(DUm,map2=aQm)
	cross_tmp=hp.anafast(TEm,pol=1,nspec=4)
	cross3=cross_tmp[-1]

	hp.write_cl('cl_'+bands+'_FR_QxaU_cmb'+str(field)+'.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ_cmb'+str(field)+'.fits',cross2)
	hp.write_cl('cl_'+bands+'_FR_TE_cmb'+str(field)+'.fits',cross3)

	prim=fits.PrimaryHDU()
	pix_col=fits.Column(name='PIXEL',format='1J',array=pix)
	col_dq=fits.Column(name='SIGNAL',format='1E',unit='K/m^2',array=Delta_Q[pix])
	col_du=fits.Column(name='SIGNAL',format='1E',unit='K/m^2',array=Delta_U[pix])
	col_aq=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=alpha_q[pix])
	col_au=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=alpha_u[pix])
	col_te1=fits.Column(name='SIGNAL',format='1E',unit='K*rad/m^2',array=TE_map[0][pix])
	col_te2=fits.Column(name='STOKES Q',format='1E',unit='K/m^2',array=TE_map[1][pix])
	col_te3=fits.Column(name='STOKES U',format='1E',unit='K/m^2',array=TE_map[2][pix])	
	data=[col_dq,col_du,col_aq,col_au]
	names=['Q','aU','U','aQ','TE']
	for i in xrange(len(data)):
		cols=fits.ColDefs([pix_col,data[i]])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
                tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
                tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
                tbhdu.header["NSIDE"]=(128,'Healpix Resolution paramter')
                tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
                tbhdu.header['OBS_NPIX']=(len(pix),'Number of pixels observed')
                tbhdu.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
                tblist=fits.HDUList([prim,tbhdu])
		tblist.writeto('quiet_cross_'+names[i]+'_cmb'+str(field)+'.fits')
	
	return (cross1,cross2,cross3)
예제 #31
0
    #scal_fits = 'deconv_data/s2let_ilc_dir_para_gauss_wmap_deconv_smoothw_extrapolated_9yr_scal_1024_2_6_3.npy'

    #Load scaling function map
    scal_map = np.load(scal_fits)

    #VARIANCE calculation
    '''scal_map_var,wav_map_var = variance(scal_map)
    print "Synthesising variance alm's"
    var_alms = ps.synthesis_wav2lm(wav_map_var,scal_map_var,wavparam,ellmax,jmin,ndir,spin,upsample)
    print "Calculating variance map"
    var_map = hp.alm2map(var_alms,nside=outnside,pixwin=True)
    hp.write_map(varfits,var_map)'''

    #Synthesise final map
    print "Synthesising final alm's"
    final_alms = ps.synthesis_wav2lm(wav_map, scal_map, wavparam, ellmax, jmin,
                                     ndir, spin, upsample)
    print "Calculating final map"
    final_map = hp.alm2map(final_alms, nside=outnside, pixwin=True)
    hp.write_map(outfits, final_map)
    print "Calculating final cl's"
    final_cls = hp.alm2cl(final_alms)
    hp.write_cl(outclfits, final_cls)
    ell = np.arange(len(final_cls))
    invtwopi = 1. / (2. * mh.pi)

    #Binning final power spectrum for plotting
    '''binlen = 8 #Should be factor of len(final_cls)
    final_cls_binned = np.mean(np.reshape(final_cls,(-1,binlen)),axis=-1)
    ell_binned = np.mean(np.reshape(ell,(-1,binlen)),axis=-1)'''
def faraday_noise_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False):
	print "Computing Cross Correlations for Bands "+str(bands)


	q_fwhm=[27.3,11.7]
	#noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(1024)
	sigma_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[0]*np.random.normal(0,1,npix)]
	sigma_j=[noise_const[1]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	
	sigma_q_i=hp.smoothing(sigma_i[0],fwhm=q_fwhm[0]*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_i[1],fwhm=q_fwhm[0]*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_j[0],fwhm=q_fwhm[1]*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_j[1],fwhm=q_fwhm[1]*np.pi/(180.*60.),verbose=False)	

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	field_pixels=hdu_i['FIELD PIXELS'].data
	hdu_i.close()
	hdu_j.close()
	
	iqu_band_i=np.zeros((3,npix))	
	iqu_band_j=np.zeros((3,npix))	
	iqu_band_i[1]=np.copy(sigma_i[0])
	iqu_band_i[2]=np.copy(sigma_i[1])
	iqu_band_j[1]=np.copy(sigma_j[0])
	iqu_band_j[2]=np.copy(sigma_j[1])
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	
	sigma_q_i=hp.smoothing(sigma_i[0],fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_i[1],fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_j[0],fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_j[1],fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)	

	sigma_q_i=hp.ud_grade(sigma_q_i,128)
	sigma_u_i=hp.ud_grade(sigma_u_i,128)
	sigma_q_j=hp.ud_grade(sigma_q_j,128)
	sigma_u_j=hp.ud_grade(sigma_u_j,128)
	
        dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits'
        dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits'
           
        ##Dust intensity scaling factor
        hdu_dust_t=fits.open(dust_t_file)
        dust_t=hdu_dust_t[1].data.field('TEMP_ML')
        hdu_dust_t.close()
        
        dust_t=hp.reorder(dust_t,n2r=1)
        dust_t=hp.ud_grade(dust_t,nside_out)
        
        hdu_dust_b=fits.open(dust_b_file)
        dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL')
        hdu_dust_b.close
        
        dust_beta=hp.reorder(dust_beta,n2r=1)	
        dust_beta=hp.ud_grade(dust_beta,nside_out)
        
        gamma_dust=6.626e-34/(1.38e-23*dust_t)
        freqs=[43.1,94.5]
        krj_to_kcmb=np.ones_like(freqs)
        dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1.)/(np.exp(gamma_dust*x*1e9)-1.)* (x/353.)**(1.+dust_beta) for i,x in enumerate(freqs)])
        sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in freqs])

	hdu_sync=fits.open(synchrotron_file)
	sync_q=hdu_sync[1].data.field(0)
	sync_u=hdu_sync[1].data.field(1)
	
	sync_q=hp.reorder(sync_q,n2r=1)
	sync_q=hp.ud_grade(sync_q,nside_out=128)

	
	sync_u=hp.reorder(sync_u,n2r=1)
	sync_u=hp.ud_grade(sync_u,nside_out=128)
	hdu_sync.close()
	
	hdu_dust=fits.open(dust_file)
	dust_q=hdu_dust[1].data.field(0)
	dust_u=hdu_dust[1].data.field(1)
	hdu_dust.close()
	
	dust_q=hp.reorder(dust_q,n2r=1)
	dust_q=hp.smoothing(dust_q,fwhm=np.sqrt(smoothing_scale**2-10.0**2)*np.pi/(180.*60.),verbose=False)
	dust_q=hp.ud_grade(dust_q,128)
	
	dust_u=hp.reorder(dust_u,n2r=1)
	dust_u=hp.smoothing(dust_u,fwhm=np.sqrt(smoothing_scale**2-10.0**2)*np.pi/(180.*60.),verbose=False)
	dust_u=hp.ud_grade(dust_u,128)
    
        gamma_sync_q=[]
        gamma_sync_u=[]
        delta_dust_q=[]
        delta_dust_u=[]
        iqu_array=[iqu_band_i,iqu_band_j]
        iqu_array_fr=[]
        for cnt,iqu in enumerate(iqu_array):
                #redefine temps to make math easier
                mask_bool1=np.repeat(True,len(dust_q))
                pix_cmb1=field_pixels.field(0)	
                pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
                tmp=np.zeros(hp.nside2npix(1024))
                tmp[pix_cmb1]+=1
                tmp=hp.ud_grade(tmp,128)
                mask_bool1[np.nonzero(tmp)]=False

                dq=hp.ma(dust_factor[cnt]*dust_q)
                du=hp.ma(dust_factor[cnt]*dust_u)
                sq=hp.ma(sync_factor[cnt]*sync_q)
                su=hp.ma(sync_factor[cnt]*sync_u)

                dq.mask=mask_bool1
                du.mask=mask_bool1
                sq.mask=mask_bool1
                su.mask=mask_bool1
                #normalization factors for scaling 
                gamma_sync_q= np.sum(iqu[1]*sq)/np.sum(sq**2)- np.sum(dq*sq)/np.sum(sq**2)*( (np.sum(sq**2)*np.sum(iqu[1]*dq)-np.sum(iqu[1]*sq)*np.sum(sq*dq))/(np.sum(dq**2)*np.sum(sq**2)-np.sum(sq*dq)**2) )
                delta_dust_q= (np.sum(sq**2)*np.sum(iqu[1]*dq)-np.sum(iqu[1]*sq)*np.sum(sq*dq))/( np.sum(dq**2)*np.sum(sq**2)-np.sum(sq*dq)**2)

                gamma_sync_u= np.sum(iqu[2]*su)/np.sum(su**2)- np.sum(du*su)/np.sum(su**2)*( (np.sum(su**2)*np.sum(iqu[2]*du)-np.sum(iqu[2]*su)*np.sum(su*du))/(np.sum(du**2)*np.sum(su**2)-np.sum(su*du)**2) )
                delta_dust_u= (np.sum(su**2)*np.sum(iqu[2]*du)-np.sum(iqu[2]*su)*np.sum(su*du))/( np.sum(du**2)*np.sum(su**2)-np.sum(su*du)**2)

                iqu_array_fr.append(np.array([iqu[0],iqu[1]-gamma_sync_q*sq-delta_dust_q*dq,iqu[2]-gamma_sync_u*su-delta_dust_u*du]))

        iqu_band_i=np.copy(iqu_array_fr[0])
        iqu_band_j=np.copy(iqu_array_fr[1])
	Weight1=np.repeat(1.,len(iqu_band_i[0]))
	Weight2=np.repeat(1.,len(iqu_band_i[0]))
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	cross1_array=[]
	cross2_array=[]
	
	Bl_factor=np.repeat(1.,3*128)
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)
	for field1 in xrange(1):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]=+1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
		#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
		
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1

		
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor**2)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor**2)
	
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross1=cross1_array[0]
	cross2=cross2_array[0]
	hp.write_cl('cl_'+bands+'_FR_noise_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_noise_UxaQ.fits',cross2)
	return (cross1,cross2)
예제 #33
0
w2 = nmt.NmtWorkspace()
w2.compute_coupling_matrix(f0, f2, b)

# We now iterate over several simulations,
# computing the power spectrum for each of them
data_00 = []
data_02 = []
for i in np.arange(nsim):
    print(i, nsim)
    mp_t, mp_q, mp_u = hp.synfast(spectra,
                                  nside=nside,
                                  fwhm=np.radians(0.39),
                                  pixwin=True,
                                  new=True,
                                  verbose=False)
    f0_sim = nmt.NmtField(msk_apo, [mp_t], beam=beam)
    f2_sim = nmt.NmtField(msk_apo, [mp_q, mp_u], beam=beam)
    data_00.append(compute_master(f0_sim, f0_sim, w))
    data_02.append(compute_master(f0_sim, f2_sim, w2))
data_00 = np.array(data_00)
data_02 = np.array(data_02)
cltt_mean = np.mean(data_00, axis=0)
cltt_std = np.std(data_00, axis=0)
clte_mean = np.mean(data_02, axis=0)
clte_std = np.std(data_02, axis=0)

hp.write_cl('cls_tt_512_beam_v2.fits', cltt_mean)
hp.write_cl('scls_tt_512_beam_v2.fits', cltt_std)
hp.write_cl('cls_te_512_beam_v2.fits', clte_mean)
hp.write_cl('scls_te_512_beam_v2.fits', clte_std)
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False):
	print "Computing Cross Correlations for Bands "+str(bands_name)

	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits'
	nside=1024
	npix=hp.nside2npix(nside)
	
	#cls=hp.read_cl(cl_file)
	#simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	#
	#alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	#alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	bands=[43.1,94.5]
        q_fwhm=[27.3,11.7]
	#noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(128)
	sigma_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[0]*np.random.normal(0,1,npix)]
	sigma_j=[noise_const[1]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	npix=hp.nside2npix(1024)
	wl=np.array([299792458./(band*1e9) for band in bands])
	num_wl=len(wl)
	#t_array=np.zeros((num_wl,npix))	
	#q_array=np.zeros((num_wl,npix))
	#u_array=np.zeros((num_wl,npix))
	#for i in range(num_wl):
	#	tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio);
	#	t_array[i],q_array[i],u_array[i]=hp.smoothing(tmp_cmb,fwhm=q_fwhm[i]*np.pi/(180.*60.),verbose=False)
	#iqu_band_i=np.array([t_array[0],q_array[0],u_array[0]])
	#iqu_band_j=np.array([t_array[1],q_array[1],u_array[1]])	


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	iqu_band_i=hdu_i['no noise iqu'].data
	iqu_band_j=hdu_j['no noise iqu'].data
	field_pixels=hdu_i['FIELD PIXELS'].data
	hdu_i.close()
	hdu_j.close()
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	cross1_array=[]
	cross2_array=[]

	Bl_factor=np.repeat(1.,3*128)
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)
	for field1 in xrange(1):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]+=1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
		#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.1e-6)]=True
		
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1

		#ipdb.set_trace()	
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor**2)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor**2)
	
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross1=cross1_array[0]
	cross2=cross2_array[0]
	hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
#	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2
	#bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2
	N_runs=500
	bins=[1,5,10,20,50]

	map_prefix='/home/matt/quiet/quiet_maps/'
	i_file=map_prefix+'quiet_simulated_43.1'
	j_file=map_prefix+'quiet_simulated_94.5'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	bands=[43.1,94.5]
	names=['43','95']
	wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[]
	cross2_array_in=[]
	Ndq_array_in=[]
	Ndu_array_in=[]
	Nau_array_in=[]
	Naq_array_in=[]
	noise1_array_in=[]
	noise2_array_in=[]
	theory1_array_in=[]
	theory2_array_in=[]
	

	#simulate_fields.main()
	ttmp1,ttmp2=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	theory1_array_in.append(ttmp1)
	theory2_array_in.append(ttmp2)
	#for n in xrange(N_runs):
	for i in xrange(N_runs):	
		print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(i+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
		tmp1,tmp2,n1,n2,n3,n4=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	#	ntmp1,ntmp2=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
		cross1_array_in.append(tmp1)
		cross2_array_in.append(tmp2)
		Ndq_array_in.append(n1)
		Ndu_array_in.append(n2)
		Nau_array_in.append(n3)
		Naq_array_in.append(n4)
	#	noise1_array_in.append(ntmp1)
	#	noise2_array_in.append(ntmp2)


	f=open('cl_theory_FR_QxaU.json','w')
	json.dump(np.array(theory1_array_in).tolist(),f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','w')
	json.dump(np.array(theory2_array_in).tolist(),f)
	f.close()	
	theory1=np.mean(theory1_array_in,axis=0)
	theory2=np.mean(theory2_array_in,axis=0)
	hp.write_cl('cl_theory_FR_QxaU.fits',theory1)
	hp.write_cl('cl_theory_FR_UxaQ.fits',theory2)
	#f=open('cl_theory_FR_QxaU.json','r')
	#theory1_array=json.load(f)
	#f.close()	
	#f=open('cl_theory_FR_UxaQ.json','r')
	#theory2_array=json.load(f)
	#f.close()	
	f=open('cl_array_FR_QxaU.json','w')
	json.dump(np.array(cross1_array_in).tolist(),f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','w')
	json.dump(np.array(cross2_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndq_FR_QxaU.json','w')
	json.dump(np.array(Ndq_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndu_FR_UxaQ.json','w')
	json.dump(np.array(Ndu_array_in).tolist(),f)
	f.close()	
	f=open('cl_Nau_FR_QxaU.json','w')
	json.dump(np.array(Nau_array_in).tolist(),f)
	f.close()	
	f=open('cl_Naq_FR_UxaQ.json','w')
	json.dump(np.array(Naq_array_in).tolist(),f)
	f.close()	
	#f=open('cl_noise_FR_QxaU.json','w')
	#json.dump(np.array(noise1_array_in).tolist(),f)
	#f.close()	
	#f=open('cl_noise_FR_UxaQ.json','w')
	#json.dump(np.array(noise2_array_in).tolist(),f)
	#f.close()	
	bins=[1,5,10,20,25,50]
	fsky=225.*(np.pi/180.)**2/(4*np.pi)
	l=np.arange(len(cross1_array_in[0]))
	ll=l*(l+1)/(2*np.pi)
	L=np.sqrt(fsky*4*np.pi)
	dl_eff=2*np.pi/L
	
        theory1_array_in=np.array(theory1_array_in)/(fsky*bls)
	theory2_array_in=np.array(theory2_array_in)/(fsky*bls)
	cross1_array_in=np.array(cross1_array_in)/(fsky*bls)
	cross2_array_in=np.array(cross2_array_in)/(fsky*bls)
	Ndq_array_in=np.array(Ndq_array_in)/(fsky)
	Ndu_array_in=np.array(Ndu_array_in)/(fsky)
	Nau_array_in=np.array(Nau_array_in)/(fsky)
	Naq_array_in=np.array(Naq_array_in)/(fsky)
	#noise1_array_in=np.array(noise1_array_in)/(fsky*bls)
	#noise2_array_in=np.array(noise2_array_in)/(fsky*bls)

	Ndq_array_in.shape += (1,)
	Ndu_array_in.shape += (1,)
	Nau_array_in.shape += (1,)
	Naq_array_in.shape += (1,)


	for b in bins:
		theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
	#	N_dq=np.mean(Ndq_array_in)
	#	N_au=np.mean(Nau_array_in)
	#	#delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#
		cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2)

	#	N_du=np.mean(Ndu_array_in)
	#	N_aq=np.mean(Naq_array_in)
	#	#delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2)

        	theory1_array=[]
        	theory2_array=[]
        	cross1_array=[]
        	cross2_array=[]
        #	noise1_array=[]
        #	noise2_array=[]
                    	
            	Ndq_array=[]
        	Ndu_array=[]
        	Nau_array=[]
        	Naq_array=[]
        	
		plot_l=[]
		if( b != 1):
	        	tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b)
	        	tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b)
			tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b)
	        	tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b)
		#	tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b)
	        #	tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b)
	        	
			theory1_array=tmp_t1['llcl']
			theory2_array=tmp_t2['llcl']
                        theory1_array.shape += (1,)
                        theory2_array.shape += (1,)
                        theory1_array=theory1_array.T
                        theory2_array=theory2_array.T
			plot_l= tmp_t1['l_out']
			cross1_array=tmp_c1['llcl']
			cross2_array=tmp_c2['llcl']
			
		#	noise1_array=tmp_n1['llcl']
		#	noise2_array=tmp_n2['llcl']
	        	
			Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl']
			Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl']
			Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl']
			Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl']
			tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b)
			#tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b)
		
			cosmic1=np.sqrt(tmp_c1['llcl'])
			#delta1=np.sqrt(tmp_d1['llcl'])

			tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b)
			#tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b)
			cosmic2=np.sqrt(tmp_c2['llcl'])
			#delta2=np.sqrt(tmp_d2['llcl'])
			t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b)
			theory_cls=t_tmp['llcl']
		else:
			plot_l=l
			theory1_array=np.multiply(ll,theory1_array_in)
			cross1_array=np.multiply(ll,cross1_array_in)
		#	noise1_array=np.multiply(ll,noise1_array_in)
			theory2_array=np.multiply(ll,theory2_array_in)
			cross2_array=np.multiply(ll,cross2_array_in)
		#	noise2_array=np.multiply(ll,noise2_array_in)
			cosmic1*=ll
			cosmic2*=ll
			#delta1*=ll
			#delta2*=ll
			Ndq_array=np.multiply(ll,Ndq_array_in)
			Ndu_array=np.multiply(ll,Ndu_array_in)
			Naq_array=np.multiply(ll,Naq_array_in)
			Nau_array=np.multiply(ll,Nau_array_in)
			theory_cls*=ll
		#ipdb.set_trace()
		bad=np.where(plot_l < 24)
		N_dq=np.mean(Ndq_array,axis=0)
		N_du=np.mean(Ndu_array,axis=0)
		N_aq=np.mean(Naq_array,axis=0)
		N_au=np.mean(Nau_array,axis=0)
		#noise1=np.mean(noise1_array,axis=0)
		#noise2=np.mean(noise2_array,axis=0)
		theory1=np.mean(theory1_array,axis=0)
		theory2=np.mean(theory1_array,axis=0)
        	theory_array = np.add(theory1_array,theory2_array)
        	theory=np.mean(theory_array,axis=0)
        	#dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1))
        	#cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2))
        	cross_array = np.add(cross1_array,cross2_array)
        	cross=np.mean(cross_array,axis=0)
        	#dcross=np.std(cross_array,axis=0,ddof=1)
        	dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1)))
        	cosmic=np.sqrt(cosmic1**2+cosmic2**2)
	
		delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.))
		delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.))
        	delta=np.sqrt(delta1**2+delta2**2)
		#cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2)))
		#theory1=np.mean(theory1_array,axis=0)
		#dtheory1=np.std(theory1_array,axis=0,ddof=1)
		#cross1=np.mean(cross1_array,axis=0)
		#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
		#ipdb.set_trace()
		plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

		#theory2=np.mean(theory2_array,axis=0)
		#dtheory2=np.std(theory2_array,axis=0,ddof=1)
		#cross2=np.mean(cross2_array,axis=0)
		##delta2=np.mean(delta2_array,axis=0)
		#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
		##ipdb.set_trace()
		#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
		#ipdb.set_trace()
    
		if b == 25 :
                        good_l=np.logical_and(plot_l <= 200,plot_l >25)
			likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr')

		#if b == 1 :
		#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
		#	vector=np.matrix(ll[1:]*cross[1:]).T
		#	mu=np.matrix(ll[1:]*theory[1:]).T
		#	fact=len(xbar)-1
		#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
		##	ipdb.set_trace()
		#	U,S,V =np.linalg.svd(cov)
		#	_cov= np.einsum('ij,j,jk', V.T,1./S,U.T)
		#	likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S)))
		##	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
		#	f=open('FR_likelihood.txt','w')
		#	f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0]))
		#	f.close()

	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
예제 #36
0
hp.mollview(diffmap,
            unit=r'$\mu\mathrm{K}$',
            title=r"SILC $(N = 2)$ - input [FFP8]",
            min=-1. * LIM,
            max=LIM)
plt.savefig(
    '/Users/kwame/Documents/s2let_ilc_papers/s2let_ilc_temp/diffmap8_n2_input_biased.pdf'
)

#Plot N=1 and NILC spectra
#ilcbeam = hp.gauss_beam(np.radians(5./60.),lmax=299) #5 arcmin
'''n1cls_masked = hp.anafast(n1map_masked,lmax=3399)
pixrecip = 1. / hp.pixwin(hp.get_nside(n1map_masked))[:3400]'''
'''fsky = 1. - (float(np.sum(np.logical_not(psmask))) / len(psmask))
print fsky'''
'''chanmaskcls_corrected = n1cls_masked * pixrecip * pixrecip #) / fsky
hp.write_cl('/Users/kwame/Documents/s2let_ilc_data/s2let_ilc_covar15_planck_diffuse_deconv_tapered_thresh_lmax3600_3600_hybridC_6_1_recon_inpaint1600_cls.fits',chanmaskcls_corrected)'''
print "Calculated spectra"
'''nilccls_masked = hp.anafast(nilcmap_masked,lmax=3399)
#pixrecip = 1. / hp.pixwin(hp.get_nside(nilcmap_masked))[:3400]
fsky = 1. - (float(np.sum(np.logical_not(psmask))) / len(psmask))
print fsky
mapbeam = 1. / af.getdata('/Users/keir/Documents/s2let_ilc_planck/COM_CMB_IQU-smica-field-Int_2048_R2.00.fits',2).field('BEAM_WF')[:3400]
ilcbeam = hp.gauss_beam(np.radians(5./60.),lmax=3399) #5 arcmin
smicacls = (nilccls_masked * mapbeam * mapbeam * ilcbeam * ilcbeam) / fsky
hp.write_cl('/Users/keir/Documents/s2let_ilc_planck/smica_pr2_ilcbeam_lmax3399_cls_masked.fits',smicacls)'''
'''lmin = 2
lmax = 3392'''
'''theorycls = hp.read_cl('/Users/keir/Software/camb/planck2015_4_scalCls.fits')[0][lmin:lmax] * 1e12 #(uK)^2
ilcbeam = hp.gauss_beam(np.radians(5./60.),lmax=3391)[lmin:] #5 arcmin
theorycls_corrected = theorycls * ilcbeam * ilcbeam'''
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False):
	print "Computer Cross Correlations for Bands "+str(bands)

	temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits'
	planck_T=hp.read_map(temperature_file)
	planck_T*=1e-6

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	#mask_hdu=fits.open('/data/wmap/wmap_polarization_analysis_mask_r9_9yr_v5.fits')
	#mask=mask_hdu[1].data.field(0)
	#mask_hdu.close()
	#mask=hp.reorder(mask,n2r=1)
	#mask=hdu_i['mask'].data
	#mask=hp.ud_grade(mask,nside_out=128)
	#pix=np.where(mask != 0)
	#pix=np.array(pix).reshape(len(pix[0]))
	#pix_bad=np.where(mask == 0)
	field_pixels=hdu_i['FIELD PIXELS'].data
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	hdu_i.close()
	hdu_j.close()
	
	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383)
	planck_T=hp.smoothing(planck_T,fwhm=np.pi/180.,lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
	weights=np.repeat(1,len(P))	
	num,bins,junk=plt.hist(P,bins=40)
	index=np.argmax(num)
	weights[np.where(P <= bins[index+1]/2.)]=.75
	weights[np.where(P <= bins[index+1]/4.)]=.5
	weights[np.where(P <= bins[index+1]/8.)]=.25
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const*weights
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const*weights
	alpha_u=alpha_radio*iqu_band_j[2]*weights
	alpha_q=-alpha_radio*iqu_band_j[1]*weights

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	if beam:
		l=np.arange(3*128)
		Bl_60=np.exp(-l*(l+1)*((60.0*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_11=np.exp(-l*(l+1)*((11.7*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_27=np.exp(-l*(l+1)*((27.3*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_factor=Bl_60**2*Bl_11*Bl_27
	else:
		Bl_factor=hp.gauss_beam(11.7*np.pi/(180.*60),lmax=383)*hp.gauss_beam(27.3*np.pi/(180.*60.),lmax=383)

	for field1 in xrange(4):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]=1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
	#	mask_bool1[np.where(P<.7e-6)]=True
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1

		TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U])
		TEm=hp.ma(TE_map)
		TEm[0].mask=mask_bool1
		TEm[1].mask=mask_bool1
		TEm[2].mask=mask_bool1
		
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor)
		cross_tmp=hp.anafast(TEm,pol=1,nspec=4)
		cross3_array.append(cross_tmp[-1]/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross3=np.mean(cross3_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	hp.write_cl('cl_'+bands+'_FR_TE_cmb.fits',cross3)
	return (cross1,cross2,cross3)
예제 #38
0
    B_alms_hp = hp.almxfl(ps.lm2lm_hp(b_alms_mw,smoothing_lmax),newbeam) #*filterbeam) #Same beams as NILC

    #E & B maps
    E_map = hp.alm2map(E_alms_hp,outnside,pixwin=True)
    B_map = hp.alm2map(B_alms_hp,outnside,pixwin=True)
    map_outfits = outfits_root + '_Emap.fits'
    hp.write_map(map_outfits,E_map)
    map_outfits = outfits_root + '_Bmap.fits'
    hp.write_map(map_outfits,B_map)

    print "Calculating final C_l"
    final_cls = hp.alm2cl((E_alms_hp,B_alms_hp)) #(EE,BB,EB)
    clscode = ['EE','BB','EB']
    for i in xrange(len(final_cls)):
        cl_outfits = outfits_root + '_' + clscode[i] + 'cls.fits'
        hp.write_cl(cl_outfits,final_cls[i])

    print "Calculating final maps"
    final_maps = hp.alm2map_spin((E_alms_hp,B_alms_hp),outnside,2,lmax=smoothing_lmax-1) #(Q,U)
    T_map = [np.zeros_like(final_maps[0]),]
    map_outfits = outfits_root + '_QUmaps.fits'
    hp.write_map(map_outfits,T_map+final_maps)

    #Downgraded map
    print "Downgrading maps"
    QU_alms = hp.map2alm(final_maps,lmax=256,pol=False)
    bigbeam = hp.gauss_beam(mh.radians(80./60.),lmax=256) / (hp.gauss_beam(mh.radians(10./60.),lmax=256) * np.concatenate((np.ones(2),hp.pixwin(hp.get_nside(final_maps[0]),pol=True)[1][2:257])))
    hp.almxfl(QU_alms[0],bigbeam,inplace=True)
    hp.almxfl(QU_alms[1],bigbeam,inplace=True)
    dg_maps = hp.alm2map(QU_alms,nside=128,pixwin=True,pol=False)
    T_map_dg = [np.zeros_like(dg_maps[0]),]
def main():
	map_prefix='/home/matt/quiet/quiet_maps/'
	i_file=map_prefix+'quiet_simulated_43.1'
	j_file=map_prefix+'quiet_simulated_94.5'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	bands=[43.1,94.5]
	names=['43','95']
	wl=np.array([299792458./(band*1e9) for band in bands])
	N_runs=100
	bins=[1,5,10,20,50]
	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	noise1_array=[]
	noise2_array=[]
	noise3_array=[]
	
	for i in xrange(N_runs):	
		simulate_fields.main()
		tmp1,tmp2,tmp3=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])
		ntmp1,ntmp2,ntmp3=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])
		cross1_array.append(tmp1)
		cross2_array.append(tmp2)
		cross3_array.append(tmp3)
		noise1_array.append(ntmp1)
		noise2_array.append(ntmp2)
		noise3_array.append(ntmp3)

	theory1,theory2,theory3=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1])
	hp.write_cl('cl_theory_FR_QxaU.fits',theory1)
	hp.write_cl('cl_theory_FR_UxaQ.fits',theory2)
	hp.write_cl('cl_theory_FR_TE.fits',theory3)
	f=open('cl_array_FR_QxaU.json','w')
	json.dump([[a for a in cross1_array[i]] for i in xrange(len(cross1_array))],f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','w')
	json.dump([[a for a in cross2_array[i]] for i in xrange(len(cross2_array))],f)
	f.close()	
	f=open('cl_array_FR_TE.json','w')
	json.dump([[a for a in cross3_array[i]] for i in xrange(len(cross3_array))],f)
	f.close()	
	f=open('cl_noise_FR_QxaU.json','w')
	json.dump([[a for a in noise1_array[i]] for i in xrange(len(noise1_array))],f)
	f.close()	
	f=open('cl_noise_FR_UxaQ.json','w')
	json.dump([[a for a in noise2_array[i]] for i in xrange(len(noise2_array))],f)
	f.close()	
	f=open('cl_noise_FR_TE.json','w')
	json.dump([[a for a in noise3_array[i]] for i in xrange(len(noise3_array))],f)
	f.close()	

	cross1=np.mean(cross1_array,axis=0)
	noise1=np.mean(noise1_array,axis=0)
	dcross1=np.std(cross1_array,axis=0)
	plot_binned.plotBinned((cross1-noise1)*1e12,dcross1*1e12,bins,'Cross_43x95_FR_QxaU', title='Cross 43x95 FR QxaU',theory=theory1*1e12)

	cross2=np.mean(cross2_array,axis=0)
	noise2=np.mean(noise2_array,axis=0)
	dcross2=np.std(cross2_array,axis=0)
	plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,bins,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12)

	cross3=np.mean(cross3_array,axis=0)
	noise3=np.mean(noise3_array,axis=0)
	dcross3=np.std(cross3_array,axis=0)
	plot_binned.plotBinned((cross3-noise3)*1e12,dcross3*1e12,bins,'Cross_43x95_FR_TE', title='Cross 43x95 FR TE',theory=theory3*1e12)
	
	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
예제 #40
0
파일: batch.py 프로젝트: plaszczy/CoLoRe
def proj(dens_type=0,
         ishell=5,
         ngrid=512,
         nside=256,
         lmax=750,
         rsd=True,
         write=True):

    dirin = get_path(dens_type, ishell, ngrid)
    os.makedirs(dirin, exist_ok=True)
    zval = (0, 0.1, 0.2, 0.3, 0.4, 0.5)

    zmax = zval[ishell]
    zmin = zval[ishell - 1]

    files = glob.glob(os.path.join(dirin, "..", "cat*.fits"))
    print("Analyzing: {}".format(dirin))
    print("shell #{}: z=[{},{}] ".format(ishell, zmin, zmax))

    print("there are {} files".format(len(files)))

    zcut = [zmin, zmax]

    print(" --> slice z=[{},{}] onto nside={} map".format(
        zcut[0], zcut[1], nside))
    npix = hp.nside2npix(nside)

    cls = []
    cpt = 1
    for file in files:
        #zrec,ra,dec=read_catalog(file,zcut,rsd)
        catalog = Catalog(file, rsd)
        zrec, ra, dec = catalog.get(zcut)
        Nsamp = len(ra)
        nbar = Nsamp / (4 * np.pi)
        print(" {} -> Nsamp={}, SN={}".format(cpt, Nsamp, 1 / nbar))
        mp = np.bincount(hp.ang2pix(nside, np.radians(90 - dec),
                                    np.radians(ra)),
                         minlength=npix)
        Nmean = mp.mean()
        map = mp.astype(float) / Nmean - 1.
        #anafast
        cl = hp.anafast(map,
                        lmax=lmax,
                        iter=0,
                        pol=False,
                        use_weights=True,
                        datapath=os.environ['HEALPIXDATA'])
        #remove SN
        cl -= 1. / nbar
        cls.append(cl)
        cpt += 1

    clm = np.mean(cls, 0)
    covmat = np.cov(np.transpose(cls))

    if write:
        dirout = os.path.join(dirin, "..", "shell{:d}".format(ishell))
        os.makedirs(dirout, exist_ok=True)
        clname = "clmean.fits"
        if not rsd:
            clname = "clmean_norsd.fits"
        f1 = os.path.join(dirout, clname)
        hp.write_cl(f1, clm, overwrite=True)
        covname = "covmat.fits"
        if not rsd:
            clname = "covmat_norsd.fits"
        f2 = os.path.join(dirout, "covmat.fits")
        hdu = fits.ImageHDU(covmat)
        hdu.writeto(f2, overwrite=True)

        print("writing {} and {}".format(f1, f2))

    return clm, covmat