예제 #1
0
N_aq_array_fr=np.array(N_aq_array_fr).swapaxes(0,1).tolist()


##Add Noies sky Corrections
#N_dq_2band_array*=1./fsky*np.mean(1./bls)
#N_du_2band_array*=1./fsky*np.mean(1./bls)
#
#N_au_array*=1./fsky*np.mean(1./bls)
#N_aq_array*=1./fsky*np.mean(1./bls)

plot_l=[]
#factor accounts of l(l+1), fksy fraction and beam
fact=ll/fsky/bls
fact_n=ll/fsky
for m in xrange(len(cross_2band_array)):
	cross_2band_array[m]=bin_llcl.bin_llcl(fact*cross_2band_array[m],bins)['llcl']

	cross_dq_2band_array[m]=bin_llcl.bin_llcl(fact*cross_dq_2band_array[m],bins)['llcl']
	cross_du_2band_array[m]=bin_llcl.bin_llcl(fact*cross_du_2band_array[m],bins)['llcl']

	cross_2band_array_fr[m]=bin_llcl.bin_llcl(fact*cross_2band_array_fr[m],bins)['llcl']

	cross_dq_2band_array_fr[m]=bin_llcl.bin_llcl(fact*cross_dq_2band_array_fr[m],bins)['llcl']
	cross_du_2band_array_fr[m]=bin_llcl.bin_llcl(fact*cross_du_2band_array_fr[m],bins)['llcl']


	theory_2band_array[m]=bin_llcl.bin_llcl(fact*theory_2band_array[m],bins)['llcl']
	theory_dq_2band_array[m]=bin_llcl.bin_llcl(fact*theory_dq_2band_array[m],bins)['llcl']

	N_au_array[m]=bin_llcl.bin_llcl(fact_n*N_au_array[m],bins)['llcl']
	N_aq_array[m]=bin_llcl.bin_llcl(fact_n*N_aq_array[m],bins)['llcl']
예제 #2
0
cmb = hp.read_map("CMB_SMICA_mask.fits")

# cmbd = hp.ud_grade(cmb, nside_out=1024)

cmbd = cmb

hp.mollview(cmbd)

plt.show()

# chiminusfore = hp.read_map('total_masked_map.fits')

lemon = 1024

cl = hp.anafast(cmbd, lmax=lemon)

l = np.arange(len(cl))

ll = l * (l + 1) / (2 * np.pi)

plt.figure()

bin_cl = bin_llcl.bin_llcl(ll * cl, 25)

plt.plot(bin_cl["l_out"], bin_cl["llcl"], ".b")

plt.errorbar(bin_cl["l_out"], bin_cl["llcl"], bin_cl["std_llcl"] / 5.0, fmt=".r")

plt.show()
예제 #3
0
파일: mwa_cor.py 프로젝트: mkolopanis/tau
    mult[b,bins*b +2:bins*b+bins -1 +2] = 1. #add two to account for binning operator a la Hizon 2002
    q_mult[bins*b +2:bins*b+bins -1 +2,b] = 1. #add two to account for binning operator a la Hizon 2002



Pbl *= mult
Qlb *= q_mult


norm = np.dot(Pbl,Qlb)[0,0]

Pbl /= np.sqrt(norm)

Qlb /= np.sqrt(norm)

l_out = bin_llcl.bin_llcl(ll,bins)['l_out']
bcross_cls= bin_llcl.bin_llcl(ll*cross_cls,bins)
bradio = bin_llcl.bin_llcl(ll*radio_cls,bins)
bcmb_cls = bin_llcl.bin_llcl(ll*cmb_cls,bins)
#bwls = bin_llcl.bin_llcl(ll*wls,bins)



Mll = MLL.Mll(wls,l)
#Mll = np.array(Mll)
np.savez('mll_mwa.npz',mll=Mll)

#Mll = np.load('mll_mwa.npz')['mll']
#Mll = Mll[2:beam_lmax,2:beam_lmax]
#Mll = Mll.reshape(lmax,lmax)
예제 #4
0
    q_mult[bins*b:bins*b+bins -1 ,b] = 1. #add two to account for binning operator a la Hizon 2002

Pbl *= mult
Qlb *= q_mult ## divide by .9 so np.dot(Pbl,Qlb) = identity


#norm = np.dot(Pbl,Qlb).diagonal()
#norm.shape = (1,norm.shape[0])

#Pbl /= np.sqrt(norm)
#Qlb /= np.sqrt(norm)

#Pbl = Pbl[:, 2:beam_lmax]
#Qlb = Pbl[2:beam_lmax, :]

l_out = bin_llcl.bin_llcl(ll,bins)['l_out']
#bcross_cls= bin_llcl.bin_llcl(ll*cross_cls,bins)
#bcmb_cls = bin_llcl.bin_llcl(ll*cmb_cls,bins)
#bwls = bin_llcl.bin_llcl(ll*wls[2:],bins)



#Mll = MLL.Mll(wls,l)
#Mll = np.array(Mll)
#np.savez('mll_chipass.npz',mll=Mll)

Mll = np.load('mll_chipass.npz')['mll']
#Mll = Mll[2:beam_lmax,2:beam_lmax]
#Mll = Mll.reshape(lmax,lmax)

#compute TOD transfer function.. Maybe
예제 #5
0
def plot_mc():
	bins=[1,5,10,20,25,50]
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2.*np.pi)
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	for num, mask_file in enumerate(mask_array):
		f=np.load('prism_simul_'+mask_name[num]+'.npz')
		theory1_array_in=f['the1_in']
		theory2_array_in=f['the2_in']
		cross1_array_in=f['c1_in']
		cross2_array_in=f['c2_in']
		noise1_array_in=f['n1_in']
		noise2_array_in=f['n2_in']
		Ndq_array_in=f['ndq_in']
		Ndu_array_in=f['ndu_in']
		Nau_array_in=f['nau_in']
		Naq_array_in=f['naq_in']

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
예제 #6
0
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
	N_runs=100
	bins=[1,5,10,20,25,50]
	gal_cut=[00,05,10,20,30]
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2*np.pi)

	map_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	file_prefix=map_prefix+'prism_simulated_'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	#wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[[],[],[]]
	cross2_array_in=[[],[],[]]
	Ndq_array_in=[[],[],[]]
	Ndu_array_in=[[],[],[]]
	Nau_array_in=[[],[],[]]
	Naq_array_in=[[],[],[]]
	noise1_array_in=[[],[],[]]
	noise2_array_in=[[],[],[]]
	theory1_array_in=[[],[],[]]
	theory2_array_in=[[],[],[]]
	

	#simulate_fields.main()
	for num, mask_file in enumerate(mask_array):
		print(Fore.WHITE+Back.RED+Style.BRIGHT+'Mask: '+mask_name[num]+Back.RESET+Fore.RESET+Style.RESET_ALL)
		count=0
		for i in [0,1,2]:
			for j in [3,4,5]:
				#for n in xrange(N_runs):
				for run in xrange(N_runs):	
					print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(run+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
					print('Bands: {0:0>3.0f} and {1:0>3.0f}'.format(bands[i],bands[j]))
					ttmp1,ttmp2=correlate_theory(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
					theory1_array_in[count].append(ttmp1)
					theory2_array_in[count].append(ttmp2)
					tmp1,tmp2,n1,n2,n3,n4=correlate_signal(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					ntmp1,ntmp2=correlate_noise(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					cross1_array_in[count].append(tmp1)
					cross2_array_in[count].append(tmp2)
					Ndq_array_in[count].append(n1)
					Ndu_array_in[count].append(n2)
					Nau_array_in[count].append(n3)
					Naq_array_in[count].append(n4)
					noise1_array_in[count].append(ntmp1)
					noise2_array_in[count].append(ntmp2)
				count+=1
		np.savez('prism_simul_'+mask_name[num]+'.npz',the1_in=theory1_array_in,the2_in=theory2_array_in,c1_in=cross1_array_in,c2_in=cross2_array_in,ndq_in=Ndq_array_in,ndu_in=Ndu_array_in,nau_in=Nau_array_in,naq_in=Naq_array_in,n1_in=noise1_array_in,n2_in=noise2_array_in)
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(noise1_array_in).tolist(),f)
				#f.close()	
				#json.dump(np.array(noise2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Nau_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Nau_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Naq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Naq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndu_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndu_array_in).tolist(),f)
				#f.close()	
			
				#fsky= 1. - np.sin(cut*np.pi/180.)
				#L=np.sqrt(fsky*4*np.pi)
				#dl_eff=2*np.pi/L

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
#	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2
	#bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2
	N_runs=500
	bins=[1,5,10,20,50]

	map_prefix='/home/matt/quiet/quiet_maps/'
	i_file=map_prefix+'quiet_simulated_43.1'
	j_file=map_prefix+'quiet_simulated_94.5'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	bands=[43.1,94.5]
	names=['43','95']
	wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[]
	cross2_array_in=[]
	Ndq_array_in=[]
	Ndu_array_in=[]
	Nau_array_in=[]
	Naq_array_in=[]
	noise1_array_in=[]
	noise2_array_in=[]
	theory1_array_in=[]
	theory2_array_in=[]
	

	#simulate_fields.main()
	ttmp1,ttmp2=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	theory1_array_in.append(ttmp1)
	theory2_array_in.append(ttmp2)
	#for n in xrange(N_runs):
	for i in xrange(N_runs):	
		print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(i+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
		tmp1,tmp2,n1,n2,n3,n4=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	#	ntmp1,ntmp2=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
		cross1_array_in.append(tmp1)
		cross2_array_in.append(tmp2)
		Ndq_array_in.append(n1)
		Ndu_array_in.append(n2)
		Nau_array_in.append(n3)
		Naq_array_in.append(n4)
	#	noise1_array_in.append(ntmp1)
	#	noise2_array_in.append(ntmp2)


	f=open('cl_theory_FR_QxaU.json','w')
	json.dump(np.array(theory1_array_in).tolist(),f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','w')
	json.dump(np.array(theory2_array_in).tolist(),f)
	f.close()	
	theory1=np.mean(theory1_array_in,axis=0)
	theory2=np.mean(theory2_array_in,axis=0)
	hp.write_cl('cl_theory_FR_QxaU.fits',theory1)
	hp.write_cl('cl_theory_FR_UxaQ.fits',theory2)
	#f=open('cl_theory_FR_QxaU.json','r')
	#theory1_array=json.load(f)
	#f.close()	
	#f=open('cl_theory_FR_UxaQ.json','r')
	#theory2_array=json.load(f)
	#f.close()	
	f=open('cl_array_FR_QxaU.json','w')
	json.dump(np.array(cross1_array_in).tolist(),f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','w')
	json.dump(np.array(cross2_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndq_FR_QxaU.json','w')
	json.dump(np.array(Ndq_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndu_FR_UxaQ.json','w')
	json.dump(np.array(Ndu_array_in).tolist(),f)
	f.close()	
	f=open('cl_Nau_FR_QxaU.json','w')
	json.dump(np.array(Nau_array_in).tolist(),f)
	f.close()	
	f=open('cl_Naq_FR_UxaQ.json','w')
	json.dump(np.array(Naq_array_in).tolist(),f)
	f.close()	
	#f=open('cl_noise_FR_QxaU.json','w')
	#json.dump(np.array(noise1_array_in).tolist(),f)
	#f.close()	
	#f=open('cl_noise_FR_UxaQ.json','w')
	#json.dump(np.array(noise2_array_in).tolist(),f)
	#f.close()	
	bins=[1,5,10,20,25,50]
	fsky=225.*(np.pi/180.)**2/(4*np.pi)
	l=np.arange(len(cross1_array_in[0]))
	ll=l*(l+1)/(2*np.pi)
	L=np.sqrt(fsky*4*np.pi)
	dl_eff=2*np.pi/L
	
        theory1_array_in=np.array(theory1_array_in)/(fsky*bls)
	theory2_array_in=np.array(theory2_array_in)/(fsky*bls)
	cross1_array_in=np.array(cross1_array_in)/(fsky*bls)
	cross2_array_in=np.array(cross2_array_in)/(fsky*bls)
	Ndq_array_in=np.array(Ndq_array_in)/(fsky)
	Ndu_array_in=np.array(Ndu_array_in)/(fsky)
	Nau_array_in=np.array(Nau_array_in)/(fsky)
	Naq_array_in=np.array(Naq_array_in)/(fsky)
	#noise1_array_in=np.array(noise1_array_in)/(fsky*bls)
	#noise2_array_in=np.array(noise2_array_in)/(fsky*bls)

	Ndq_array_in.shape += (1,)
	Ndu_array_in.shape += (1,)
	Nau_array_in.shape += (1,)
	Naq_array_in.shape += (1,)


	for b in bins:
		theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
	#	N_dq=np.mean(Ndq_array_in)
	#	N_au=np.mean(Nau_array_in)
	#	#delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#
		cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2)

	#	N_du=np.mean(Ndu_array_in)
	#	N_aq=np.mean(Naq_array_in)
	#	#delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2)

        	theory1_array=[]
        	theory2_array=[]
        	cross1_array=[]
        	cross2_array=[]
        #	noise1_array=[]
        #	noise2_array=[]
                    	
            	Ndq_array=[]
        	Ndu_array=[]
        	Nau_array=[]
        	Naq_array=[]
        	
		plot_l=[]
		if( b != 1):
	        	tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b)
	        	tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b)
			tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b)
	        	tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b)
		#	tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b)
	        #	tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b)
	        	
			theory1_array=tmp_t1['llcl']
			theory2_array=tmp_t2['llcl']
                        theory1_array.shape += (1,)
                        theory2_array.shape += (1,)
                        theory1_array=theory1_array.T
                        theory2_array=theory2_array.T
			plot_l= tmp_t1['l_out']
			cross1_array=tmp_c1['llcl']
			cross2_array=tmp_c2['llcl']
			
		#	noise1_array=tmp_n1['llcl']
		#	noise2_array=tmp_n2['llcl']
	        	
			Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl']
			Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl']
			Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl']
			Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl']
			tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b)
			#tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b)
		
			cosmic1=np.sqrt(tmp_c1['llcl'])
			#delta1=np.sqrt(tmp_d1['llcl'])

			tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b)
			#tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b)
			cosmic2=np.sqrt(tmp_c2['llcl'])
			#delta2=np.sqrt(tmp_d2['llcl'])
			t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b)
			theory_cls=t_tmp['llcl']
		else:
			plot_l=l
			theory1_array=np.multiply(ll,theory1_array_in)
			cross1_array=np.multiply(ll,cross1_array_in)
		#	noise1_array=np.multiply(ll,noise1_array_in)
			theory2_array=np.multiply(ll,theory2_array_in)
			cross2_array=np.multiply(ll,cross2_array_in)
		#	noise2_array=np.multiply(ll,noise2_array_in)
			cosmic1*=ll
			cosmic2*=ll
			#delta1*=ll
			#delta2*=ll
			Ndq_array=np.multiply(ll,Ndq_array_in)
			Ndu_array=np.multiply(ll,Ndu_array_in)
			Naq_array=np.multiply(ll,Naq_array_in)
			Nau_array=np.multiply(ll,Nau_array_in)
			theory_cls*=ll
		#ipdb.set_trace()
		bad=np.where(plot_l < 24)
		N_dq=np.mean(Ndq_array,axis=0)
		N_du=np.mean(Ndu_array,axis=0)
		N_aq=np.mean(Naq_array,axis=0)
		N_au=np.mean(Nau_array,axis=0)
		#noise1=np.mean(noise1_array,axis=0)
		#noise2=np.mean(noise2_array,axis=0)
		theory1=np.mean(theory1_array,axis=0)
		theory2=np.mean(theory1_array,axis=0)
        	theory_array = np.add(theory1_array,theory2_array)
        	theory=np.mean(theory_array,axis=0)
        	#dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1))
        	#cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2))
        	cross_array = np.add(cross1_array,cross2_array)
        	cross=np.mean(cross_array,axis=0)
        	#dcross=np.std(cross_array,axis=0,ddof=1)
        	dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1)))
        	cosmic=np.sqrt(cosmic1**2+cosmic2**2)
	
		delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.))
		delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.))
        	delta=np.sqrt(delta1**2+delta2**2)
		#cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2)))
		#theory1=np.mean(theory1_array,axis=0)
		#dtheory1=np.std(theory1_array,axis=0,ddof=1)
		#cross1=np.mean(cross1_array,axis=0)
		#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
		#ipdb.set_trace()
		plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

		#theory2=np.mean(theory2_array,axis=0)
		#dtheory2=np.std(theory2_array,axis=0,ddof=1)
		#cross2=np.mean(cross2_array,axis=0)
		##delta2=np.mean(delta2_array,axis=0)
		#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
		##ipdb.set_trace()
		#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
		#ipdb.set_trace()
    
		if b == 25 :
                        good_l=np.logical_and(plot_l <= 200,plot_l >25)
			likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr')

		#if b == 1 :
		#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
		#	vector=np.matrix(ll[1:]*cross[1:]).T
		#	mu=np.matrix(ll[1:]*theory[1:]).T
		#	fact=len(xbar)-1
		#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
		##	ipdb.set_trace()
		#	U,S,V =np.linalg.svd(cov)
		#	_cov= np.einsum('ij,j,jk', V.T,1./S,U.T)
		#	likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S)))
		##	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
		#	f=open('FR_likelihood.txt','w')
		#	f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0]))
		#	f.close()

	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
def plot_mc():

	f=open('cl_theory_FR_QxaU.json','r')
	theory1_array_in=json.load(f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','r')
	theory2_array_in=json.load(f)
	f.close()	
	f=open('cl_array_FR_QxaU.json','r')
	cross1_array_in=json.load(f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','r')
	cross2_array_in=json.load(f)
	f.close()	
#	f=open('cl_noise_FR_QxaU.json','r')
#	noise1_array_in=json.load(f)
#	f.close()	
#	f=open('cl_noise_FR_UxaQ.json','r')
#	noise2_array_in=json.load(f)
#	f.close()	
	f=open('cl_Nau_FR_QxaU.json','r')
	Nau_array_in=json.load(f)
	f.close()	
	f=open('cl_Ndq_FR_QxaU.json','r')
	Ndq_array_in=json.load(f)
	f.close()	
	f=open('cl_Naq_FR_UxaQ.json','r')
	Naq_array_in=json.load(f)
	f.close()	
	f=open('cl_Ndu_FR_UxaQ.json','r')
	Ndu_array_in=json.load(f)
	f.close()	
	
	bins=[1,5,10,20,25,50]
	N_runs=500
#	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2
	#bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2
	#bls=np.repeat(1,3*nside_out)
	fsky=225.*(np.pi/180.)**2/(4*np.pi)
	l=np.arange(len(cross1_array_in[0]))
	ll=l*(l+1)/(2*np.pi)
	L=np.sqrt(fsky*4*np.pi)
	dl_eff=2*np.pi/L

        theory1_array_in=np.array(theory1_array_in)/(fsky*bls)
	theory2_array_in=np.array(theory2_array_in)/(fsky*bls)
	cross1_array_in=np.array(cross1_array_in)/(fsky*bls)
	cross2_array_in=np.array(cross2_array_in)/(fsky*bls)
	Ndq_array_in=np.array(Ndq_array_in)/(fsky)
	Ndu_array_in=np.array(Ndu_array_in)/(fsky)
	Nau_array_in=np.array(Nau_array_in)/(fsky)
	Naq_array_in=np.array(Naq_array_in)/(fsky)
	#noise1_array_in=np.array(noise1_array_in)/(fsky*bls)
	#noise2_array_in=np.array(noise2_array_in)/(fsky*bls)

	Ndq_array_in.shape += (1,)
	Ndu_array_in.shape += (1,)
	Nau_array_in.shape += (1,)
	Naq_array_in.shape += (1,)


	for b in bins:
		theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
	#	N_dq=np.mean(Ndq_array_in)
	#	N_au=np.mean(Nau_array_in)
	#	#delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#
		cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2)

	#	N_du=np.mean(Ndu_array_in)
	#	N_aq=np.mean(Naq_array_in)
	#	#delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2)

        	theory1_array=[]
        	theory2_array=[]
        	cross1_array=[]
        	cross2_array=[]
        #	noise1_array=[]
        #	noise2_array=[]
                    	
            	Ndq_array=[]
        	Ndu_array=[]
        	Nau_array=[]
        	Naq_array=[]
        	
		plot_l=[]
		if( b != 1):
	        	tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b)
	        	tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b)
			tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b)
	        	tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b)
		#	tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b)
	        #	tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b)
	        	
			theory1_array=tmp_t1['llcl']
			theory2_array=tmp_t2['llcl']
                        theory1_array.shape += (1,)
                        theory2_array.shape += (1,)
                        theory1_array=theory1_array.T
                        theory2_array=theory2_array.T
			plot_l= tmp_t1['l_out']
			cross1_array=tmp_c1['llcl']
			cross2_array=tmp_c2['llcl']
			
		#	noise1_array=tmp_n1['llcl']
		#	noise2_array=tmp_n2['llcl']
	        	
			Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl']
			Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl']
			Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl']
			Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl']
			tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b)
			#tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b)
		
			cosmic1=np.sqrt(tmp_c1['llcl'])
			#delta1=np.sqrt(tmp_d1['llcl'])

			tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b)
			#tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b)
			cosmic2=np.sqrt(tmp_c2['llcl'])
			#delta2=np.sqrt(tmp_d2['llcl'])
			t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b)
			theory_cls=t_tmp['llcl']
		else:
			plot_l=l
			theory1_array=np.multiply(ll,theory1_array_in)
			cross1_array=np.multiply(ll,cross1_array_in)
		#	noise1_array=np.multiply(ll,noise1_array_in)
			theory2_array=np.multiply(ll,theory2_array_in)
			cross2_array=np.multiply(ll,cross2_array_in)
		#	noise2_array=np.multiply(ll,noise2_array_in)
			cosmic1*=ll
			cosmic2*=ll
			#delta1*=ll
			#delta2*=ll
			Ndq_array=np.multiply(ll,Ndq_array_in)
			Ndu_array=np.multiply(ll,Ndu_array_in)
			Naq_array=np.multiply(ll,Naq_array_in)
			Nau_array=np.multiply(ll,Nau_array_in)
			theory_cls*=ll
		#ipdb.set_trace()
		bad=np.where(plot_l < 24)
		N_dq=np.mean(Ndq_array,axis=0)
		N_du=np.mean(Ndu_array,axis=0)
		N_aq=np.mean(Naq_array,axis=0)
		N_au=np.mean(Nau_array,axis=0)
		#noise1=np.mean(noise1_array,axis=0)
		#noise2=np.mean(noise2_array,axis=0)
		theory1=np.mean(theory1_array,axis=0)
		theory2=np.mean(theory1_array,axis=0)
        	theory_array = np.add(theory1_array,theory2_array)
        	theory=np.mean(theory_array,axis=0)
        	#dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1))
        	#cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2))
        	cross_array = np.add(cross1_array,cross2_array)
        	cross=np.mean(cross_array,axis=0)
        	#dcross=np.std(cross_array,axis=0,ddof=1)
        	dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1)))
        	cosmic=np.sqrt(cosmic1**2+cosmic2**2)
	
		delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.))
		delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.))
        	delta=np.sqrt(delta1**2+delta2**2)
		#cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2)))
		#theory1=np.mean(theory1_array,axis=0)
		#dtheory1=np.std(theory1_array,axis=0,ddof=1)
		#cross1=np.mean(cross1_array,axis=0)
		#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
		#ipdb.set_trace()
                good_l=np.logical_and(plot_l <= 250,plot_l >25)
		plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

		#theory2=np.mean(theory2_array,axis=0)
		#dtheory2=np.std(theory2_array,axis=0,ddof=1)
		#cross2=np.mean(cross2_array,axis=0)
		##delta2=np.mean(delta2_array,axis=0)
		#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
		##ipdb.set_trace()
		#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
		#ipdb.set_trace()
    
		if b == 25 :
                        good_l=np.logical_and(plot_l <= 250,plot_l >25)
			likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr')

		#if b == 1 :
		#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
		#	vector=np.matrix(ll[1:]*cross[1:]).T
		#	mu=np.matrix(ll[1:]*theory[1:]).T
		#	fact=len(xbar)-1
		#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
		##	ipdb.set_trace()
		#	U,S,V =np.linalg.svd(cov)
		#	_cov= np.einsum('ij,j,jk', V.T,1./S,U.T)
		#	likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S)))
		##	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
		#	f=open('FR_likelihood.txt','w')
		#	f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0]))
		#	f.close()

	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
예제 #9
0
파일: Spicer.py 프로젝트: giuspugl/HADES
            
        ispice(FitsDir+infile+'.fits',outCl,maskfile1=FitsDir+'MaskHard%s.fits' %GAL,\
               weightfile1=FitsDir+'Mask%s.fits' %GAL,apodizesigma=sig_apodize,\
               thetamax=sig_apodize)

        # Now check for file to be created
        start_time=time.time()
        while not os.path.exists(outCl):
            time.sleep(1)

            if time.time()-start_time>10:
                raise Exception('Failed to compute')

      # Bin Cl files
        Cl=hp.read_cl(outCl)
        X,Y,_,E=bin_llcl(Cl[4:],5,flatten=True)
        idx=np.where(Y>0.)
        X=X[idx]
        Y=Y[idx]
        E=E[idx]
        if infile=='A':
            ClA.append([X,Y,E,Cl])
        elif infile=='H2':
            ClH2.append([X,Y,E,Cl])
        else:
            ClAng.append([X,Y,E,Cl])
            
        # E is errors assuming full-sky coverage
        sky_frac = np.sum(mask)/len(mask) # this is sky fraction used
        E/=np.sqrt(sky_frac)
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
	N_runs=10
	bins=[1,5,10,20,25,50]
	gal_cut=[-20,-10,-05,00,05,10,20]
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2*np.pi)

	#map_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	#file_prefix=map_prefix+'planck_simulated_'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	#wl=np.array([299792458./(band*1e9) for band in bands])
	#theory1_array_in=[]
	#theory2_array_in=[]
	
	dsets=glob.glob('/data/Planck/LFI*1024*R2.00*.fits')
	dsets.sort()
	#simulate_fields.main()
	for cut in gal_cut:
		cross1_array_in=[]
		cross2_array_in=[]
		dcross1_array_in=[]
		dcross2_array_in=[]
		Ndq_array_in=[]
		Ndu_array_in=[]
		Nau_array_in=[]
		Naq_array_in=[]
		noise1_array_in=[]
		noise2_array_in=[]
		
		print('Galactic cut: {:2d}'.format(cut))
		for i in xrange(len(bands)-1):
			for j in xrange(i+1,len(bands)):
				print('    Bands: {0:0>3.0f} x {1:0>3.0f}'.format(bands[i],bands[j]))
				tmp_cross1_array=[]
				tmp_cross2_array=[]
				tmp_noise1_array=[]
				tmp_noise2_array=[]
				##arrays used to average over noise realizations
				tmp_Ndq_array=[]
				tmp_Ndu_array=[]
				tmp_Nau_array=[]
				tmp_Naq_array=[]
				
				tmp_c1,tmp_c2=correlate_signal(dsets[i],dsets[j],wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,gal_cut=cut)
				cross1_array_in.append(tmp_c1)
				cross2_array_in.append(tmp_c2)
				
				for n in xrange(N_runs):
					print('\tNoise Correlation #{:0>3d}'.format(n+1))
					
					tmp_n1,tmp_n2,tmp_dq,tmp_du,tmp_au,tmp_aq=correlate_noise(dsets[i],dsets[j],wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,gal_cut=cut)
					tmp_noise1_array.append(tmp_n1)
					tmp_noise2_array.append(tmp_n2)
					tmp_Ndq_array.append(tmp_dq)
					tmp_Ndu_array.append(tmp_du)
					tmp_Naq_array.append(tmp_aq)
					tmp_Nau_array.append(tmp_au)
				
				noise1_array_in.append(np.mean(tmp_noise1_array,axis=0))
				noise2_array_in.append(np.mean(tmp_noise2_array,axis=0))
				Ndq_array_in.append(np.mean(tmp_Ndq_array))
				Ndu_array_in.append(np.mean(tmp_Ndu_array))
				Nau_array_in.append(np.mean(tmp_Nau_array))
				Naq_array_in.append(np.mean(tmp_Naq_array))
				#print( str(np.shape(cross1_array_in) ) + ' line 587')

		#read in theory_cls
		theory_in=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
		
		#print( str(np.shape(cross1_array_in)) + ' line 592' )
		#out_dic={'theory':theory_in,'c1':cross1_array_in,'c2':cross2_array_in,'n1':noise1_array_in,'n2':noise2_array_in,'ndq':Ndq_array_in,'nau':Nau_array_in,'ndu':Ndu_array_in,'naq':Naq_array_in}	
		np.savez('FR_planck_cut_{0:2>02d}.npz'.format(cut),theory=theory_in,c1=cross1_array_in,c2=cross2_array_in,n1=noise1_array_in,n2=noise2_array_in,ndq=Ndq_array_in,nau=Nau_array_in,ndu=Ndu_array_in,naq=Naq_array_in)
	
		#print( str(np.shape(cross1_array_in)) + ' line 596')
		if cut >= 0:	
			fsky= 1. - np.sin(cut*np.pi/180.)
		else:
			fsky= np.abs(np.sin(cut*np.pi/180.))
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
		
		#print( str(np.shape(cross1_array_in)) + ' line 604')
		#ipdb.set_trace()
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky

		#ipdb.set_trace()

		for b in bins:
			#N_dq=np.mean(Ndq_array_in)
			#N_au=np.mean(Nau_array_in)
			#Transpose arrays to match dimensions for operations
			#cross1_array_in=cross1_array_in.T
			#cross2_array_in=cross2_array_in.T
			#noise1_array_in=noise1_array_in.T
			#noise2_array_in=noise2_array_in.T
			
			#ipdb.set_trace()

			delta1=np.sqrt( np.divide(2.*abs( (cross1_array_in - noise1_array_in).T**2 + (cross1_array_in - noise1_array_in). T * (Ndq_array_in+Nau_array_in)/2. + Ndq_array_in*Nau_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)))

			#N_du=np.mean(Ndu_array_in)
			#N_aq=np.mean(Naq_array_in)
			delta2=np.sqrt( np.divide(2.*abs( (cross2_array_in - noise2_array_in).T**2 + (cross2_array_in - noise2_array_in).T * (Ndu_array_in+Naq_array_in)/2. + Ndu_array_in*Naq_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)))
		
			#Transpose arrays back to match for plotting 
			#cross1_array=cross1_array_in.T
			#cross2_array=cross2_array_in.T
			#noise1_array=noise1_array_in.T
			#noise2_array=noise2_array_in.T
			#ipdb.set_trace()	
			
			cosmic=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*theory_in**2)
			
        		delta_array=np.sqrt(delta1**2+delta2**2)	
        		cross_array = np.add(np.subtract(cross1_array_in,noise1_array_in),np.subtract(cross2_array_in,noise2_array_in))
        		cross=np.average(cross_array,weights=1./delta_array**2,axis=0)
			dcross=np.average(delta_array,weights=1./delta_array**2,axis=0)
			plot_l=[]
			if( b != 1):
				tmp_c1=bin_llcl.bin_llcl(ll*cross/bls,b)
		        	                            
				cross=tmp_c1['llcl']
				
				tmp_dc1=bin_llcl.bin_llcl(ll*dcross/bls,b)
		        	                            
				dcross= tmp_dc1['llcl']
		        	
		        	plot_l=tmp_c1['l_out']
				tmp_t1=bin_llcl.bin_llcl(ll*theory_in/bls,b)
				theory=tmp_t1['llcl']
				tmp_c1=bin_llcl.bin_llcl(ll*cosmic/bls,b)
				cosmic=tmp_c1['llcl']
				
			
				
			else:
				plot_l=l
				theory=np.multiply(ll/bls,theory_in)
				#cross1_array=np.multiply(ll/bls,cross1_array_in)
				#noise1_array=np.multiply(ll/bls,noise1_array_in)
				#cross2_array=np.multiply(ll/bls,cross2_array_in)
				#noise2_array=np.multiply(ll/bls,noise2_array_in)
				cross*=ll/bls
				cosmic*=ll/bls
				dcross*=ll/bls
				#cosmic2*=ll/bls
				#delta1*=ll/bls
				#delta2*=ll/bls
				#ipdb.set_trace()
			bad=np.where(plot_l < dl_eff)
			#noise1=np.mean(noise1_array,axis=0)
			#noise2=np.mean(noise2_array,axis=0)
        		#theory_array = np.add(theory1_array,theory2_array)
        		#theory=np.mean(theory_array,axis=0)
        		#dtheory=np.std(theory_array,axis=0,ddof=1)
        		#cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		#delta_array=np.sqrt(delta1**2+delta2**2)
			##cross_array=np.add(cross1_array,cross2_array)
			#ipdb.set_trace()
        		#cross=np.average(cross_array,weights=1./delta_array**2,axis=0)
        		#cosmic=np.sqrt(cosmic1**2+cosmic2**2)
			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#dcross=np.average(delta_array,weights=1./delta_array**2,axis=0)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_FR_cut_{0:0>2d}'.format(cut), title='Faraday Rotation Correlator',theory=theory*1e12,dtheory=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-10,10,1001)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				#Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))	\
				Noise=np.sqrt(1./np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.sum(dcross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				#Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				#Noise2=np.sqrt(1./np.sum(1./dcross**2))
				#Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				#Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				#SNR2=Sig2/Noise2
				#SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood_{0:0>2d}.txt'.format(cut),'w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				#f.write('Detection using Theoretical Noise \n')
				#f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				#f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

	subprocess.call('mv Maximum_likelihood_cut_{:2>02d}.txt' .format(cut), shell=True)
	subprocess.call('mv *01.png bin_01/', shell=True)
	subprocess.call('mv *05.png bin_05/', shell=True)
	subprocess.call('mv *10.png bin_10/', shell=True)
	subprocess.call('mv *20.png bin_20/', shell=True)
	subprocess.call('mv *25.png bin_25/', shell=True)
	subprocess.call('mv *50.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/'.format(cut), shell=True)
def plot_mc():
	bins=[1,5,10,20,25,50]
	gal_cut=[-20,-10,-05,00,05,10,20]
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2*np.pi)
	for cut in gal_cut:
		data=np.load('FR_planck_cut_{0:2>02d}.npz'.format(cut))
		
		theory_in=data['theory']
		cross1_array_in=data['c1']
		cross2_array_in=data['c2']
		noise1_array_in=data['n1']
		noise2_array_in=data['n2']
		Ndq_array_in=data['ndq']	
		Ndu_array_in=data['ndu']	
		Naq_array_in=data['naq']	
		Nau_array_in=data['nau']	
		
		#print( str(np.shape(cross1_array_in)) + ' line 596')
		if cut >= 0:	
			fsky= 1. - np.sin(cut*np.pi/180.)
		else:
			fsky= np.abs(np.sin(cut*np.pi/180.))
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
		
		#print( str(np.shape(cross1_array_in)) + ' line 604')
		#ipdb.set_trace()
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky

		#ipdb.set_trace()

		for b in bins:
			#N_dq=np.mean(Ndq_array_in)
			#N_au=np.mean(Nau_array_in)
			#Transpose arrays to match dimensions for operations
			#cross1_array_in=cross1_array_in.T
			#cross2_array_in=cross2_array_in.T
			#noise1_array_in=noise1_array_in.T
			#noise2_array_in=noise2_array_in.T
			
			#ipdb.set_trace()

			delta1=np.sqrt( np.divide(2.*abs( (cross1_array_in - noise1_array_in).T**2 + (cross1_array_in - noise1_array_in). T * (Ndq_array_in+Nau_array_in)/2. + Ndq_array_in*Nau_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)))

			#N_du=np.mean(Ndu_array_in)
			#N_aq=np.mean(Naq_array_in)
			delta2=np.sqrt( np.divide(2.*abs( (cross2_array_in - noise2_array_in).T**2 + (cross2_array_in - noise2_array_in).T * (Ndu_array_in+Naq_array_in)/2. + Ndu_array_in*Naq_array_in/2.).T,((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky)))
		
			#Transpose arrays back to match for plotting 
			#cross1_array=cross1_array_in.T
			#cross2_array=cross2_array_in.T
			#noise1_array=noise1_array_in.T
			#noise2_array=noise2_array_in.T
			#ipdb.set_trace()	
			
			cosmic=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*theory_in**2)
			
        		delta_array=np.sqrt(delta1**2+delta2**2)	
        		cross_array = np.add(np.subtract(cross1_array_in,noise1_array_in),np.subtract(cross2_array_in,noise2_array_in))
        		cross=np.average(cross_array,weights=1./delta_array**2,axis=0)
			dcross=np.average(delta_array,weights=1./delta_array**2,axis=0)
			plot_l=[]
			if( b != 1):
				tmp_c1=bin_llcl.bin_llcl(ll*cross/bls,b)
		        	                            
				cross=tmp_c1['llcl']
				
				tmp_dc1=bin_llcl.bin_llcl(ll*dcross/bls,b)
		        	                            
				dcross= tmp_dc1['llcl']
		        	
		        	plot_l=tmp_c1['l_out']
				tmp_t1=bin_llcl.bin_llcl(ll*theory_in/bls,b)
				theory=tmp_t1['llcl']
				tmp_c1=bin_llcl.bin_llcl(ll*cosmic/bls,b)
				cosmic=tmp_c1['llcl']
				
			
				
			else:
				plot_l=l
				theory=np.multiply(ll/bls,theory_in)
				#cross1_array=np.multiply(ll/bls,cross1_array_in)
				#noise1_array=np.multiply(ll/bls,noise1_array_in)
				#cross2_array=np.multiply(ll/bls,cross2_array_in)
				#noise2_array=np.multiply(ll/bls,noise2_array_in)
				cross*=ll/bls
				cosmic*=ll/bls
				dcross*=ll/bls
				#cosmic2*=ll/bls
				#delta1*=ll/bls
				#delta2*=ll/bls
				#ipdb.set_trace()
			bad=np.where(plot_l < dl_eff)
			#noise1=np.mean(noise1_array,axis=0)
			#noise2=np.mean(noise2_array,axis=0)
        		#theory_array = np.add(theory1_array,theory2_array)
        		#theory=np.mean(theory_array,axis=0)
        		#dtheory=np.std(theory_array,axis=0,ddof=1)
        		#cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		#delta_array=np.sqrt(delta1**2+delta2**2)
			##cross_array=np.add(cross1_array,cross2_array)
			#ipdb.set_trace()
        		#cross=np.average(cross_array,weights=1./delta_array**2,axis=0)
        		#cosmic=np.sqrt(cosmic1**2+cosmic2**2)
			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#dcross=np.average(delta_array,weights=1./delta_array**2,axis=0)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_FR_cut_{0:0>2d}'.format(cut), title='Faraday Rotation Correlator',theory=theory*1e12,dtheory=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-10,10,1001)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				#Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))	\
				Noise=np.sqrt(1./np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.sum(dcross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				#Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				#Noise2=np.sqrt(1./np.sum(1./dcross**2))
				#Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				#Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				#SNR2=Sig2/Noise2
				#SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				#f.write('Detection using Theoretical Noise \n')
				#f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				#f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

	subprocess.call('mv Maximum_likelihood_cut_{:2>02d}.txt' .format(cut), shell=True)
	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/'.format(cut), shell=True)