Exemple #1
0
def padded_wrap(map_id,map_size=a.map_size,\
	sep=a.sep,N_sims=a.N_sims,N_bias=a.N_bias,noise_power=a.noise_power,FWHM=a.FWHM,\
	slope=a.slope,l_step=a.l_step,lMin=a.lMin,lMax=a.lMax,rot=a.rot,freq=a.freq,\
	delensing_fraction=a.delensing_fraction,useTensors=a.useTensors,f_dust=a.f_dust,\
	rot_average=a.rot_average,useBias=a.useBias,padding_ratio=a.padding_ratio,unPadded=a.unPadded,flipU=a.flipU,root_dir=a.root_dir,\
	KKdebiasH2=a.KKdebiasH2,cutFactor=1.25,ffp10_spectrum=a.ffp10_spectrum):
	""" Compute the estimated angle, amplitude and polarisation fraction with noise, correcting for bias.
	Noise model is from Hu & Okamoto 2002 and errors are estimated using MC simulations, which are all saved.
	
	Input: map_id (tile number)
	map_size (tile width in degrees)
	sep (separation of tile centres in degrees)
	N_sims (number of MC simulations)
	N_bias (no. sims used for bias computation)
	noise_power (noise power in microK-arcmin)
	FWHM (noise FWHM in arcmin)
	slope (fiducial slope of C_l isotropic dust dependance)
	lMin / lMax (range of ell values to apply the estimators over)
	l_step (step size for binning of 2D spectra)
	rot (angle to rotate by before applying estimators)
	freq (desired map frequency; 150 GHz for BICEP, 353 GHz for Vansyngel)
	delensing_fraction (efficiency of delensing; i.e. 0.1=90% removed)
	useTensors (Boolean, whether to include tensor noise from IGWs with r = 0.1)
	f_dust (factor to reduce mean dust amplitude by - for null testing - default = 1 - no reduction)
	rot_average (Boolean, whether to correct for pixellation error by performing (corrected) map rotations)
	useBias (Boolean, whether to use bias)
	padding_ratio (ratio of padded to unpadded map width >=1)
	flipU (Boolean, whether to flip sign of U-map if in Planck COSMO convention)
	root_dir (home directory)
	
	Output: First 6 values: [estimate,isotropic mean, isotropic stdev] for {A,Afs,Afc,fs,fc,str,ang}
	7th: full data for N_sims as a sequence of 7 lists for each estimate (each of length N_sims)
	8th: full data for N_sims for the monopole bias term
	9th: [estimate,isotropic mean, isotropic stdev] for Hexadecapole power
	10: true monopole (from noiseless simulation) - for testing
	11th: bias (isotropic estimate of <H^2>)
	"""
	lCut=int(cutFactor*lMax) # maximum ell for Fourier space maps
	
	# First compute B-mode map from padded-real space map with desired padding ratio. Also compute the padded window function for later use
	from .PaddedPower import MakePowerAndFourierMaps,DegradeMap,DegradeFourier
	fBdust,padded_window,unpadded_window=MakePowerAndFourierMaps(map_id,padding_ratio=padding_ratio,map_size=map_size,sep=sep,freq=freq,fourier=True,power=False,returnMasks=True,flipU=flipU,root_dir=root_dir)
	
	#if np.sum(unpadded_window.data.ravel())==0.:
	#	print 'no data here'
	#	return 1 # no data in this pixel
			
	# Also compute unpadded map to give binning values without bias
	unpadded_fBdust=MakePowerAndFourierMaps(map_id,padding_ratio=1.,map_size=map_size,sep=sep,freq=freq,fourier=True,power=False,returnMasks=False,flipU=flipU,root_dir=root_dir)
	#unpadded_fBdust_large=DegradeFourier(unpadded_fBdust,lCut+200)
	#unpadded_fBdust=DegradeFourier(unpadded_fBdust,lCut) # remove high ell pixels
	#fBdust_large=DegradeFourier(fBdust.copy(),lCut+200)
	#fBdust=DegradeFourier(fBdust,lCut) # discard high-ell pixels
	bigger_window=unpadded_window.copy()
	#bigger_window=DegradeMap(bigger_window.copy(),lCut+200)
	#padded_window=DegradeMap(padded_window.copy(),lCut) # remove high-ell data
	#unpadded_window=DegradeMap(unpadded_window.copy(),lCut)
	
	if a.hexTest:
		# TESTING - replace fourier B-mode from dust with random isotropic realisation of self
		powDust=fftTools.powerFromFFT(fBdust) # compute power
		from .PowerMap import oneD_binning
		ll,pp=oneD_binning(powDust.copy(),10,lCut,l_step,binErr=False,exactCen=False) # compute one-D binned spectrum
		from .PaddedPower import fourier_noise_test
		fBdust,unpadded_fBdust=fourier_noise_test(padded_window,unpadded_window,ll,pp,padding_ratio=padding_ratio,unpadded=False,log=True)
		#fBdust.kMap=fill_from_Cell(fBdust.copy(),ll,pp,fourier=True,power=False) # generate Gaussian realisation
	#return powDust,fBdust,unpadded_fBdust
	# Reduce dust amplitude by 'dedusting fraction'
	unpadded_fBdust.kMap*=f_dust
	fBdust.kMap*=f_dust
	
	# Compute <W^2>^2 / <W^4> - this is a necessary correction for the H^2 quantities (since 4-field quantities)
	wCorrection = np.mean(padded_window.data**2.)**2./np.mean(padded_window.data**4.)
	
	# Input directory:
	inDir=root_dir+'%sdeg%s/' %(map_size,sep)
	
	# First compute the total noise (instrument+lensing+tensors)
	from .NoisePower import noise_model,lensed_Cl,r_Cl
	Cl_lens_func=lensed_Cl(delensing_fraction=delensing_fraction,ffp10_spectrum=ffp10_spectrum) # function for lensed Cl
	
	if useTensors: # include r = 0.1 estimate
		Cl_r_func=r_Cl()
		def total_Cl_noise(l):
			return Cl_lens_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)+Cl_r_func(l)
	else:
		def total_Cl_noise(l):
			return Cl_lens_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)
	
	# Now create a fourier space noise map	
	from .PaddedPower import fourier_noise_map
	ellNoise=np.arange(5,lCut+200) # ell range for noise spectrum
	
	from .RandomField import fill_from_model
	#fourierNoise=fourier_noise_map
	
	from .PaddedPower import fourier_noise_test
	fourierNoise,unpadded_noise=fourier_noise_test(padded_window.copy(),unpadded_window.copy(),ellNoise,total_Cl_noise(ellNoise),padding_ratio=padding_ratio,unpadded=False,log=a.log_noise)
	
	unpadded_noise=DegradeFourier(unpadded_noise.copy(),lCut)
	fourierNoise=DegradeFourier(fourierNoise.copy(),lCut)
	#print unpadded_noise.Nx,fourierNoise.Nx
	padded_window=DegradeMap(padded_window.copy(),lCut)
	unpadded_window=DegradeMap(unpadded_window.copy(),lCut)
	
	#unpadded_noise=unpadded_fBdust.copy() # this map is generated completely in Fourier space to avoid errors
	#unpadded_noise.kMap=fill_from_model(unpadded_fBdust.copy(),total_Cl_noise,fourier=True,power=False)
	#fourierNoise=fourier_noise_map(padded_window.copy(),unpadded_window.copy(),ellNoise,total_Cl_noise(ellNoise),padding_ratio=padding_ratio,unpadded=False)
	#fourierNoise,unpadded_noise=fourier_noise_map(padded_window.copy(),unpadded_window.copy(),ellNoise,total_Cl_noise(ellNoise),padding_ratio=padding_ratio,unpadded=True)
	#return fourierNoise,unpadded_noise#,unpadded_noise2
	#return fftTools.powerFromFFT(fourierNoise)
	
	#return fourierNoise,unpadded_noise
	
	# Compute total map
	#ellTrial=lCut+200
	#while True: # hack to ensure same sized maps
#		if fourierNoise.Nx==fBdust.Nx:
#			if fourierNoise.Ny==fBdust.Ny:
#				break
#		fourierNoise=DegradeFourier(fourierNoise.copy(),ellTrial)
#		fBdust_large=DegradeFourier(fBdust_large.copy(),ellTrial)
#		ellTrial-=1.
#		if ellTrial<lCut-200:
#			print 'failed1'
#			break
#	ellCut=lCut+200
#	while True:
#		if unpadded_noise.Nx==unpadded_fBdust_large.Nx:
###			if unpadded_noise.Ny==unpadded_fBdust_large.Ny:
#				break
#		unpadded_noise=DegradeFourier(unpadded_noise.copy(),ellTrial)
#		unpadded_fBdust_large=DegradeFourier(unpadded_fBdust_large.copy(),ellTrial)
#		ellTrial-=1.
#		if ellTrial<lCut:
#			print 'failed2'
#			break
	totFmap=DegradeFourier(fBdust.copy(),lCut)				
	totFmap.kMap+=fourierNoise.kMap# for total B modes
	unpadded_totFmap=DegradeFourier(unpadded_fBdust.copy(),lCut)
	unpadded_totFmap.kMap+=unpadded_noise.kMap
	
	
	# Now convert to power-space
	totPow=fftTools.powerFromFFT(totFmap) # total power map
	Bpow=fftTools.powerFromFFT(fBdust) # dust only map
	unpadded_totPow=fftTools.powerFromFFT(unpadded_totFmap)
	
	
	del fourierNoise,unpadded_noise
	
	if unPadded: # only use unpadded maps here
		totFmap=unpadded_totFmap
		totPow=unpadded_totPow
		padded_window=unpadded_window
			
			
	#return totPow,Bpow
		
	# Compute true amplitude using ONLY dust map
	from .KKdebiased import derotated_estimator
	p=derotated_estimator(Bpow.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,factor=None,FWHM=0.,\
			noise_power=1.e-400,rot=rot,delensing_fraction=0.,useTensors=False,debiasAmplitude=False,rot_average=rot_average,KKdebiasH2=False)
	trueA=p[0]
	del Bpow		
	
	# Compute rough semi-analytic C_ell spectrum
	def analytic_model(ell,A_est,slope):
		"""Use the estimate for A to construct analytic model.
		NB: This is just used for finding the centres of the actual binned data.
		"""
		return total_Cl_noise(ell)+A_est*ell**(-slope)
	
	# Compute anisotropy parameters
	A_est,fs_est,fc_est,Afs_est,Afc_est,finalFactor=derotated_estimator(totPow.copy(),map_id,lMin=lMin,\
		lMax=lMax,slope=slope,factor=None,FWHM=FWHM,noise_power=noise_power,rot=rot,\
		delensing_fraction=delensing_fraction,useTensors=useTensors,debiasAmplitude=True,rot_average=rot_average,KKdebiasH2=KKdebiasH2)
	# (Factor is expected monopole amplitude (to speed convergence))
	
	## Run MC Simulations	
	
	# Compute 1D power spectrum by binning in annuli
	from .PowerMap import oneD_binning
	l_cen,mean_pow = oneD_binning(unpadded_totPow.copy(),lMin*padding_ratio,lCut,l_step*padding_ratio,binErr=False,exactCen=a.exactCen,\
					C_ell_model=analytic_model,params=[A_est,slope]) 
	#l_cen,mean_pow=oneD_binning(totPow.copy(),lMin,lCut,l_step,binErr=False,exactCen=a.exactCen,C_ell_model=analytic_model,params=[A_est,slope])
	# gives central binning l and mean power in annulus using window function corrections 
	
	# Create spline fit
	from scipy.interpolate import UnivariateSpline
	spl=UnivariateSpline(l_cen,np.log(mean_pow),k=5)
	def spline(ell):
		return np.exp(spl(ell))
	#del l_cen,mean_pow
	
	# Precompute useful data:
	from hades.RandomField import precompute
	precomp=precompute(padded_window.copy(),spline,lMin=lMin,lMax=lMax)
	
	#from .RandomField import padded_fill_from_Cell
	#fBias=padded_fill_from_Cell(padded_window.copy(),l_cen,mean_pow,lMin=lMin)#,padding_ratio=padding_ratio)
	##bias_cross=fftTools.powerFromFFT(fBias.copy(),totFmap.copy()) # cross map
	#bias_self=fftTools.powerFromFFT(fBias.copy()) # self map
	#return bias_self,l_cen,mean_pow		
		
	# First compute the bias factor
	from .RandomField import padded_fill_from_Cell
	#all_sims=[]
	if useBias:
		bias_data=np.zeros(N_bias)
		for n in range(N_bias):
			if n%100==0:
				print 'Computing bias sim %s of %s' %(n+1,N_bias)
			fBias=padded_fill_from_Cell(padded_window.copy(),l_cen,mean_pow,lMin=lMin,unPadded=unPadded,precomp=precomp)#,padding_ratio=padding_ratio)
			bias_cross=fftTools.powerFromFFT(fBias.copy(),totFmap.copy()) # cross map
			bias_self=fftTools.powerFromFFT(fBias.copy()) # self map
			# First compute estimators on cross-spectrum
			cross_ests=derotated_estimator(bias_cross.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\
							factor=finalFactor,FWHM=FWHM,noise_power=noise_power,\
							rot=rot,delensing_fraction=delensing_fraction,useTensors=useTensors,\
							debiasAmplitude=False,rot_average=rot_average,KKdebiasH2=False) # NB: CHANGE DEBIAS_AMPLITUDE parameter here
			self_ests=derotated_estimator(bias_self.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\
							factor=finalFactor,FWHM=FWHM,noise_power=noise_power,\
							rot=rot,delensing_fraction=delensing_fraction,useTensors=useTensors,\
							debiasAmplitude=True,rot_average=rot_average,KKdebiasH2=KKdebiasH2)
			bias_data[n]=(-1.*(self_ests[3]**4.+self_ests[4]**2.)+4.*(cross_ests[3]**2.+cross_ests[4]**2.))*wCorrection
		# Now compute the mean bias - this debiases the DATA only
		bias=np.mean(bias_data)
		del bias_self,bias_cross
	else:
		print 'No bias subtraction'
		bias=0.
			
	## Now run the MC sims proper:
	# Initialise arrays
	A_MC,fs_MC,fc_MC,Afs_MC,Afc_MC,epsilon_MC,ang_MC,HexPow2_MC=[np.zeros(N_sims) for _ in range(8)]
	#MC_map=totPow.copy() # template for SIM-SIM data
	
	for n in range(N_sims): # for each MC map
		if n%100==0:
			print('MapID %s: Starting simulation %s of %s' %(map_id,n+1,N_sims))
		# Create the map with a random implementation of Cell
		fourier_MC_map=padded_fill_from_Cell(padded_window.copy(),l_cen,mean_pow,lMin=lMin,unPadded=unPadded,precomp=precomp)
		MC_map=fftTools.powerFromFFT(fourier_MC_map.copy()) # create power domain map
		
		# Now use the estimators on the MC sims
		output=derotated_estimator(MC_map.copy(),map_id,lMin=lMin,lMax=lMax,\
			slope=slope,factor=finalFactor,FWHM=FWHM,noise_power=noise_power,\
			rot=rot, delensing_fraction=delensing_fraction,useTensors=useTensors,\
			debiasAmplitude=True,rot_average=rot_average,KKdebiasH2=KKdebiasH2) 
		
		# Compute MC anisotropy parameters  
		A_MC[n]=output[0]
		fs_MC[n]=output[3]/output[0]
		fc_MC[n]=output[4]/output[0]
		Afs_MC[n]=output[3] # these are fundamental quantities here
		Afc_MC[n]=output[4]
		epsilon_MC[n]=np.sqrt((output[3]**2.+output[4]**2.)*wCorrection)/output[0] # NOT corrected for bias in <H^2>
		ang_MC[n]=0.25*180./np.pi*np.arctan2(output[3],output[4]) # NB: this is not corrected for bias
		HexPow2_MC[n]=(output[3]**2.+output[4]**2.)*wCorrection 
	if useBias:	
		isoBias=np.mean(HexPow2_MC)
		HexPow2_MC-=isoBias*np.ones_like(HexPow2_MC) # remove the bias (i.e. mean of H^2 from all sims)
	else:
		isoBias=0.
		
	print 'MC sims complete'	
	
	del fourier_MC_map,MC_map,totFmap,unpadded_totFmap,totPow,unpadded_totPow,padded_window,unpadded_window # delete unneeded variables
	
	# Regroup data
	allMC=[A_MC,fs_MC,fc_MC,Afs_MC,Afc_MC,epsilon_MC,ang_MC,HexPow2_MC]
	
	# Compute anisotropy fraction and angle from data
	ang_est=0.25*180./np.pi*(np.arctan2(Afs_est,Afc_est)) # in degrees (already corrected for rotation) - NB: not debiased
	frac_est=np.sqrt((Afs_est**2.+Afc_est**2.)*wCorrection)/A_est # BIASED sqrt(<H^2>)/A
	HexPow2_est=(Afs_est**2.+Afc_est**2.)*wCorrection-bias # estimated hexadecapolar power - debiased + corrected for <W^4>
	
	# Compute means and standard deviations
	A_mean=np.mean(A_MC)
	A_std=np.std(A_MC)
	fc_mean=np.mean(fc_MC)
	fs_mean=np.mean(fs_MC)
	fc_std=np.std(fc_MC)
	fs_std=np.std(fs_MC)
	frac_mean=np.mean(epsilon_MC)
	frac_std=np.std(epsilon_MC)
	ang_mean=np.mean(ang_MC)
	ang_std=np.std(ang_MC)
	HexPow2_mean=np.mean(HexPow2_MC)
	HexPow2_std=np.std(HexPow2_MC)
	Afs_mean=np.mean(Afs_MC)
	Afc_mean=np.mean(Afc_MC)
	Afs_std=np.std(Afs_MC)
	Afc_std=np.std(Afc_MC)
	
	# Regroup data
	Adat=[A_est,A_mean,A_std]
	fsdat=[fs_est,fs_mean,fs_std]
	fcdat=[fc_est,fc_mean,fc_std]
	Afsdat=[Afs_est,Afs_mean,Afs_std]
	Afcdat=[Afc_est,Afc_mean,Afc_std]
	fracdat=[frac_est,frac_mean,frac_std] # hexadecapolar anisotropy fraction (epsilon)
	angdat=[ang_est,ang_mean,ang_std] # anisotropy angle
	HexPow2dat=[HexPow2_est,HexPow2_mean,HexPow2_std] # hexadecapole amplitude
	
	# Return all output
	return Adat,fsdat,fcdat,Afsdat,Afcdat,fracdat,angdat,allMC,[],HexPow2dat,trueA,bias,wCorrection,isoBias # (empty set to avoid reordering later code)
Exemple #2
0
def compute_angle(map_id,padding_ratio=a.padding_ratio,map_size=a.map_size,sep=a.sep,freq=a.freq,\
                  f_dust=a.f_dust,lMax=a.lMax,lMin=a.lMin,l_step=a.l_step,FWHM=a.FWHM,noise_power=a.noise_power,\
                  slope=a.slope,delensing_fraction=a.delensing_fraction,useQU=a.useQU,N_bias=a.N_bias):
    """Compute the polarisation angle for a specific tile, creating a model B-power spectrum + cross-spectra
    in order to find the angle including the ambiguity in sin(2alpha), cos(2alpha) due to initial computation
    of sin(4alpha), cos(4alpha).
    
    Returns angle in degrees.
    """

    # Step 1, create actual B-mode map
    lCut=int(1.35*lMax) # maximum ell for Fourier space maps

    # First compute B-mode map from padded-real space map with desired padding ratio. Also compute the padded window function for later use
    from hades.PaddedPower import MakePowerAndFourierMaps,DegradeMap,DegradeFourier
    fBdust,padded_window,unpadded_window=MakePowerAndFourierMaps(map_id,padding_ratio=padding_ratio,map_size=map_size,sep=sep,freq=freq,fourier=True,power=False,returnMasks=True,flipU=a.flipU)

    # Also compute unpadded map to give binning values without bias
    unpadded_fBdust=MakePowerAndFourierMaps(map_id,padding_ratio=1.,map_size=map_size,freq=freq,fourier=True,power=False,returnMasks=False,flipU=a.flipU)
    unpadded_fBdust=DegradeFourier(unpadded_fBdust,lCut) # remove high ell pixels

    fBdust=DegradeFourier(fBdust,lCut) # discard high-ell pixels
    padded_window=DegradeMap(padded_window.copy(),lCut) # remove high-ell data
    unpadded_window=DegradeMap(unpadded_window.copy(),lCut)

    unpadded_fBdust.kMap*=f_dust
    fBdust.kMap*=f_dust

    wCorrection = np.mean(padded_window.data**2.)**2./np.mean(padded_window.data**4.)

    from hades.NoisePower import noise_model,lensed_Cl,r_Cl
    Cl_lens_func=lensed_Cl(delensing_fraction=delensing_fraction) # function for lensed Cl

    def total_Cl_noise(l):
        return Cl_lens_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)

    from hades.PaddedPower import fourier_noise_map
    ellNoise=np.arange(5,lCut) # ell range for noise spectrum

    from hades.RandomField import fill_from_model
    #fourierNoise=fourier_noise_map

    from hades.PaddedPower import fourier_noise_test
    fourierNoise,unpadded_noise=fourier_noise_test(padded_window,unpadded_window,ellNoise,total_Cl_noise(ellNoise),padding_ratio=padding_ratio,unpadded=False,log=True)

    totFmap=fBdust.copy()
    totFmap.kMap+=fourierNoise.kMap# for total B modes
    unpadded_totFmap=unpadded_fBdust.copy()
    unpadded_totFmap.kMap+=unpadded_noise.kMap

    fBtrue=totFmap.copy()

    # Step 2: Compute the I map
    inDir=a.root_dir+'%sdeg%s/' %(map_size,sep)
    Tmap=liteMap.liteMapFromFits(inDir+'fvsmapT_'+str(map_id).zfill(5)+'.fits')
    Qmap=liteMap.liteMapFromFits(inDir+'fvsmapQ_'+str(map_id).zfill(5)+'.fits')
    Umap=liteMap.liteMapFromFits(inDir+'fvsmapU_'+str(map_id).zfill(5)+'.fits')
    Umap.data*=-1.
    QUmap=Qmap.copy()
    QUmap.data=np.sqrt(Qmap.data**2.+Umap.data**2.)
    if useQU:
    	scaling=np.mean(QUmap.data**4.)
    else:
    	scaling=np.mean(Tmap.data**4.)
    
    maskMap=liteMap.liteMapFromFits(inDir+'fvsmapMaskSmoothed_'+str(map_id).zfill(5)+'.fits')
    from hades.PaddedPower import zero_padding
    zTmap=zero_padding(Tmap,padding_ratio)
    zQUmap=zero_padding(QUmap,padding_ratio)
    zWindow=zero_padding(maskMap,padding_ratio)
    # Compute window factor <W^2> for padded window (since this is only region with data)
    windowFactor=np.mean(zWindow.data**2.)

    # Define mod(l) and ang(l) maps needed for fourier transforms
    modL,angL=fp.fftPol.makeEllandAngCoordinate(zTmap) # choice of map is arbitary
    # Create pure T,E,B maps using 'hybrid' method to minimize E->B leakage
    zTmap.data*=zWindow.data
    zQUmap.data*=zWindow.data
    fT=fftTools.fftFromLiteMap(zTmap)
    fQU=fftTools.fftFromLiteMap(zQUmap)

    # Rescale to correct amplitude using dust SED
    from hades.PowerMap import dust_emission_ratio
    dust_intensity_ratio=dust_emission_ratio(freq)

    fT.kMap*=dust_intensity_ratio # apply dust-reduction factor 
    fT.kMap/=np.sqrt(windowFactor)
    fQU.kMap*=dust_intensity_ratio
    fQU.kMap/=np.sqrt(windowFactor)
    fImap=DegradeFourier(fT,lCut)
    fQUmap=DegradeFourier(fQU,lCut)

    # Step 3: Compute angle estimate
    powBtrue=fftTools.powerFromFFT(fBtrue)
    unpadded_powBtrue=fftTools.powerFromFFT(unpadded_totFmap)
    from hades.KKdebiased import derotated_estimator
    output=derotated_estimator(powBtrue,map_id,lMin=lMin,lMax=lMax,FWHM=FWHM,noise_power=noise_power,delensing_fraction=delensing_fraction,slope=slope)
    A,fs,fc,Afs,Afc,_=output
    HexPow2=Afs**2.+Afc**2.
    
    if a.debias_dedust:
    	from .RandomField import padded_fill_from_Cell
	bias_data=np.zeros(N_bias)
	
	def analytic_model(ell,A_est,slope):
		"""Use the estimate for A to construct analytic model.
		NB: This is just used for finding the centres of the actual binned data.
		"""
		return total_Cl_noise(ell)+A_est*ell**(-slope)
	
	from .PowerMap import oneD_binning
	l_cen,mean_pow = oneD_binning(unpadded_powBtrue.copy(),lMin*padding_ratio,lCut,l_step*padding_ratio,binErr=False,exactCen=a.exactCen,\
					C_ell_model=analytic_model,params=[A,slope]) 
	#l_cen,mean_pow=oneD_binning(totPow.copy(),lMin,lCut,l_step,binErr=False,exactCen=a.exactCen,C_ell_model=analytic_model,params=[A_est,slope])
	# gives central binning l and mean power in annulus using window function corrections 
	
	# Create spline fit
	from scipy.interpolate import UnivariateSpline
	spl=UnivariateSpline(l_cen,np.log(mean_pow),k=5)
	def spline(ell):
		return np.exp(spl(ell))
	#del l_cen,mean_pow
	
	# Precompute useful data:
	from hades.RandomField import precompute
	precomp=precompute(padded_window.copy(),spline,lMin=lMin,lMax=lMax)
	
	for n in range(N_bias):
		if n%100==0:
			print 'Computing bias sim %s of %s' %(n+1,N_bias)
		fBias=padded_fill_from_Cell(padded_window.copy(),l_cen,mean_pow,lMin=lMin,unPadded=a.unPadded,precomp=precomp)#,padding_ratio=padding_ratio)
		bias_cross=fftTools.powerFromFFT(fBias.copy(),totFmap.copy()) # cross map
		bias_self=fftTools.powerFromFFT(fBias.copy()) # self map
		# First compute estimators on cross-spectrum
		cross_ests=derotated_estimator(bias_cross.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\
						factor=A,FWHM=FWHM,noise_power=noise_power,\
						rot=a.rot,delensing_fraction=delensing_fraction,useTensors=a.useTensors,\
						debiasAmplitude=False,rot_average=a.rot_average,KKdebiasH2=False) # NB: CHANGE DEBIAS_AMPLITUDE parameter here
		self_ests=derotated_estimator(bias_self.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\
						factor=A,FWHM=FWHM,noise_power=noise_power,\
						rot=a.rot,delensing_fraction=delensing_fraction,useTensors=a.useTensors,\
						debiasAmplitude=True,rot_average=a.rot_average,KKdebiasH2=a.KKdebiasH2)
		bias_data[n]=(-1.*(self_ests[3]**4.+self_ests[4]**2.)+4.*(cross_ests[3]**2.+cross_ests[4]**2.))*wCorrection
	# Now compute the mean bias - this debiases the DATA only
	bias=np.mean(bias_data)
	del bias_self,bias_cross
	
    HexPow2-=bias	
    
    norm=np.sqrt(Afs**2.+Afc**2.)
    fsbar,fcbar=Afs/norm,Afc/norm

    sin2a=fsbar/np.sqrt(2.*(fcbar+1.))
    cos2a=np.sqrt((1.+fcbar)/2.)

    # Step 4: Compute B estimate
    angleMap=fImap.thetaMap*np.pi/180.
    fB_est=fImap.copy()
    if useQU:
    	baseMap=fQUmap.copy()
    else:
    	baseMap=fImap.copy()
    fB_est.kMap=baseMap.kMap*(sin2a*np.cos(2.*angleMap)-cos2a*np.sin(2.*angleMap))

    # Step 5: Now compute cross coefficient
    crossPow=fftTools.powerFromFFT(fB_est,fBtrue)
    estPow=fftTools.powerFromFFT(fB_est,fB_est)

    from hades.PowerMap import oneD_binning
    lC,pC=oneD_binning(crossPow,lMin,lMax/2.,l_step,exactCen=False)
    lE,pE=oneD_binning(estPow,lMin,lMax/2.,l_step,exactCen=False)
    lB,pB=oneD_binning(powBtrue,lMin,lMax/2.,l_step,exactCen=False)
    #rho=np.array(pC)/np.sqrt(np.array(pB)*np.array(pE))
    ratio=np.array(pC)/np.array(pE)
    sign=np.sign(np.mean(ratio))

    # Step 6: Now compute the actual angle
    alpha0=0.25*np.arctan2(fsbar,fcbar) # range is [-pi/4,pi/4]
    if sign==-1.0:
        alpha0+=np.pi/2.
       
    # Step 7: Compute the ratio of H^2/<I^4> for rescaling
    ratio=(np.abs(HexPow2)/scaling)**0.25
    
    alpha_deg=alpha0*180./np.pi
    print 'MapID: %s Angle: %.2f Ratio: %.2e' %(map_id,alpha_deg,ratio)
    
    return alpha_deg,ratio