Esempio n. 1
0
def patch_hexadecapole(map_size=a.map_size,sep=a.sep,FWHM=a.FWHM,noise_power=a.noise_power,\
            freq=a.freq,delensing_fraction=a.delensing_fraction,N_sims=a.N_sims,suffix='',folder=None,\
            root_dir=a.root_dir,I2SNR=a.I2SNR,returnAll=False,plot=True,monopole=False,display=False):
    """Compute the global hexadecapole anisotropy over the patch, summing the epsilon values weighted by the S/N.
    The estimate is assumed Gaussian by Central Limit Theorem.
    Errors are obtained by computing estimate for many MC sims
    
    returnAll parameter returns mean,std,estimate H^2 values
    plot parameter controls whether to create H^2 histogram
    """
    # Load array of map ids
    import os

    goodDir = root_dir + '%sdeg%sGoodIDs.npy' % (map_size, sep)
    if not os.path.exists(goodDir):
        from hades.batch_maps import create_good_map_ids
        create_good_map_ids(map_size=map_size, sep=sep, root_dir=root_dir)
        print 'creating good IDs'
    goodMaps = np.load(root_dir + '%sdeg%sGoodIDs.npy' % (map_size, sep))

    if I2SNR:
        I2dir = root_dir + '%sdeg%s/meanI2.npy' % (map_size, sep)
        QUdir = root_dir + '%sdeg%s/meanQU.npy' % (map_size, sep)
        import os
        if not os.path.exists(I2dir):
            raise Exception('Must compute <I^2> data by running batchI2.py')
        I2data = np.load(I2dir)
        QUdata = np.load(QUdir)

    # Initialize variables
    Asum, trueAsum = 0., 0.
    count = 0
    hex2_patch_num = 0.
    A_patch_num = 0.  # for mean amplitude shift
    norm = 0.  # normalisation
    hex2_patch_MC_num = np.zeros(N_sims)
    A_patch_MC_num = np.zeros(N_sims)
    h2_est, h2_mean, h2_eps, bias_data = [], [], [], []
    for i in range(len(goodMaps)):
        # Load dataset
        if root_dir == '/data/ohep2/liteBIRD/':
            ij = goodMaps[i]
        else:
            ij = i  # for compatibility
        if folder == None:
            if a.true_lensing:
                folder = 'LensedPaddedBatchData'
            else:
                folder = 'PaddedBatchData'
        datPath = root_dir + folder + '/f%s_ms%s_s%s_fw%s_np%s_d%s/%s%s.npy' % (
            freq, map_size, sep, FWHM, noise_power, delensing_fraction, ij,
            suffix)
        if root_dir == '/data/ohep2/liteBIRD/':
            if not os.path.exists(datPath):
                continue
        data = np.load(datPath)
        A = data[0][1]
        A_est = data[0][0]
        A_MC = data[7][0]
        A_eps = data[0][2]
        hex2_est = data[9][0]
        hex2_MC = data[7][7]
        hex2_mean = data[9][1]
        hex2_eps = data[9][2]
        trueA = data[10]
        Asum += A_est
        trueAsum += trueA
        count += 1
        if returnAll:
            h2_est.append(hex2_est)
            h2_mean.append(hex2_mean)
            h2_eps.append(hex2_eps)
            bias_data.append(data[11])

        # Compute contribution to mean epsilon
        if I2SNR:
            SNR = 1.  #(trueA**2./hex2_eps)**2.#/A_est#(hex2_mean/hex2_eps)**2.#(data[0][1]/data[0][2])**2.#/hex_eps**2.#I2data[i]**2./hex_eps**2.
        else:
            SNR = 1.  #(trueA/hex_eps)**2.
        hex2_patch_num += SNR * (hex2_est)  #-hex2_mean)
        A_patch_num += SNR * A_est
        norm += SNR
        for j in range(N_sims):
            hex2_patch_MC_num[j] += SNR * (hex2_MC[j])  #-hex2_mean)
            A_patch_MC_num[j] += SNR * A_MC[j]

    # Compute mean epsilon + MC values
    hex2_patch = hex2_patch_num / norm
    hex2_patch_MC = hex2_patch_MC_num / norm
    A_patch = A_patch_num / norm
    A_patch_MC = A_patch_MC_num / norm

    # Compute monopole mean
    trueA = trueAsum / count
    A = Asum / count
    # Compute mean and standard deviation
    MC_mean = np.mean(hex2_patch_MC)
    MC_std = np.std(hex2_patch_MC)
    A_MC_mean = np.mean(A_patch_MC)
    A_MC_std = np.std(A_patch_MC)

    # Compute significance of detection
    if a.useBias:
        sigmas = (hex2_patch) / MC_std  #-MC_mean)/MC_std
    else:
        sigmas = (hex2_patch - MC_mean) / MC_std  # remove bias here
    sigmasA = (A_patch - A_MC_mean) / A_MC_std

    if plot:
        # Now plot
        import matplotlib.pyplot as plt
        y, x, _ = plt.hist(hex2_patch_MC, bins=30, density=True)
        plt.ylabel('PDF')
        plt.xlabel('Patch Averaged Bias-corrected H^2')
        plt.title(
            '%.2f Sigma // Patch Averaged Corrected H^2 // %s patches & %s sims'
            % (sigmas, len(goodMaps), N_sims))
        xpl = np.ones(100) * hex2_patch
        ypl = np.linspace(0, max(y), 100)
        plt.plot(xpl, ypl, ls='--', c='r')
        plt.ylim(0, max(y))
        outDir = root_dir + 'PatchHexCorrectedPow/'
        import os
        if not os.path.exists(outDir):
            os.makedirs(outDir)
        plt.savefig(
            outDir + 'hist_f%s_ms%s_s%s_fw%s_np%s_d%s.png' %
            (freq, map_size, sep, FWHM, noise_power, delensing_fraction),
            bbox_inches='tight')
        plt.clf()
        plt.close()

    if returnAll:
        return np.mean(h2_mean), np.mean(h2_eps), np.mean(
            h2_est), sigmas, sigmasA, np.mean(bias_data)
    else:
        if monopole:
            #print sigmas,sigmasA,A,trueA
            return sigmas, sigmasA, A, trueA
        else:
            if display:
                print(
                    'The significance of a detection of dust anisotropy in this region is %.2f sigma'
                    % (sigmas))
                return None
            else:
                return sigmas, sigmasA
Esempio n. 2
0
	
	batch_id=int(sys.argv[1]) # batch_id number
	
	# First load good IDs:
	goodFile=a.root_dir+'%sdeg%sGoodIDs.npy' %(a.map_size,a.sep)
	
	outDir=a.root_dir+'PaddedBatchData/f%s_ms%s_s%s_fw%s_np%s_d%s/' %(a.freq,a.map_size,a.sep,a.FWHM,a.noise_power,a.delensing_fraction)
	
	if a.remakeErrors:
		if os.path.exists(outDir+'%s.npy' %batch_id):
			print 'output exists; exiting'
			sys.exit()
	
	if batch_id<110: # create first time
		from hades.batch_maps import create_good_map_ids
		create_good_map_ids()
		print 'creating good IDs'
		
	goodIDs=np.load(goodFile)
	
	
	if batch_id>len(goodIDs)-1:
		print 'Process %s terminating' %batch_id
		sys.exit() # stop here
	
	map_id=goodIDs[batch_id] # this defines the tile used here
	
	print '%s starting for map_id %s' %(batch_id,map_id)

		
	# Now run the estimation
Esempio n. 3
0
def hex_fullsky_stats(map_size=a.map_size,sep=a.sep,FWHM=a.FWHM,noise_power=a.noise_power,\
    freq=a.freq,delensing_fraction=a.delensing_fraction,makePlots=False):
    """ Function to create plots for each tile. These use hexadecapolar power mostly
    Plots are saved in the CorrectedMaps/ directory. This is designed to read masked full sky data such as from cutoutAndSimulate.py """

    raise Exception('Update to include H^2 stats')
    import matplotlib.pyplot as plt
    from scipy.stats import percentileofscore
    import os

    # Import good map IDs
    goodDir = a.root_dir + '%sdeg%sGoodIDs.npy' % (map_size, sep)
    if not os.path.exists(goodDir):
        from hades.batch_maps import create_good_map_ids
        create_good_map_ids()
        print 'creating good IDs'
    goodMaps = np.load(goodDir)

    # Define arrays
    A, Afs, Afc, fs, fc, ang, frac, probA, probP, logA, epsDeb, HPow, HPowDeb, HPowSig, HprobA, HprobP, logHPowMean = [
        np.zeros(len(goodMaps)) for _ in range(17)
    ]
    logHexPow, logHexPowDeb, A_err, Af_err, f_err, ang_err, frac_err, frac_mean = [
        np.zeros(len(goodMaps)) for _ in range(8)
    ]

    # Define output directories:
    outDir = a.root_dir + 'CorrectedMaps/f%s_ms%s_s%s_fw%s_np%s_d%s/' % (
        freq, map_size, sep, FWHM, noise_power, delensing_fraction)

    if not os.path.exists(outDir):
        os.makedirs(outDir)

    # Iterate over maps:
    for i in range(len(goodMaps)):
        map_id = goodMaps[i]  # map id number
        if i % 100 == 0:
            print 'loading %s of %s' % (i + 1, len(goodMaps))
        # Load in data from tile
        data = np.load(a.root_dir +
                       'PaddedBatchData/f%s_ms%s_s%s_fw%s_np%s_d%s/%s.npy' %
                       (freq, map_size, sep, FWHM, noise_power,
                        delensing_fraction, map_id))

        # Load in data
        A[i], fs[i], fc[i], Afs[i], Afc[i], frac[i], ang[i] = [
            d[0] for d in data[:7]
        ]  # NB: only A, Afs, Afc, ang are useful
        # NB: A is already debiased
        if A[i] > 0:
            logA[i] = np.log10(A[i])
        else:
            logA[i] = 1000.  # to avoid errors - treat these separately

        A_err[i], fs_err, fc_err, Afs_err, Afc_err, frac_err[i] = [
            d[2] for d in data[:6]
        ]
        frac_mean[i] = data[5][1]

        # Compute debiased H
        HPow[i] = data[9][0]
        HPow_MC = data[7][7]
        logHPowMean[i] = np.log10(np.mean(HPow_MC))
        HPowDeb[i] = data[9][0] - data[9][1]
        HPowSig[i] = (data[9][0] - data[9][1]) / data[9][2]  # not very useful
        logHexPow[i] = np.log10(HPow[i])
        if HPowDeb[i] > 0.:
            logHexPowDeb[i] = np.log10(HPowDeb[i])
        else:
            logHexPowDeb[i] = 1000.

        # Compute other errors
        f_err[i] = np.mean([fs_err, fc_err])  # not useful
        Af_err[i] = np.mean([Afs_err, Afc_err])
        ang_err[i] = Af_err[i] / (4 * HPow[i]) * 180. / np.pi  # error in angle

        # Compute statistics for Hexadecapolar power
        HprobP[i] = percentileofscore(
            HPow_MC, HPow[i],
            kind='mean')  # statistical percentile of estimated data

        def H_CDF(H, sigma_Af):
            """CDF of HexPow modified chi2 distribution"""
            return 1 - np.exp(-H**2. / (2. * sigma_Af**2.))

        #def H_PDF(H,sigma_Af):
        #	"""PDF of HexPow modified Chi2 distribution"""
        #	return H/(sigma_Af**2.)*np.exp(-H**2./(2.*sigma_Af**2.))

        # Compute analytic CDF percentile:
        HprobA[i] = 100. * H_CDF(HPow[i], Af_err[i])

        # Now repeat for epsilon - NB: this uses the debiased monopole amplitudes implicitly
        eps = data[7][5]  # all epsilon data
        eps_est = data[5][0]
        epsDeb[i] = eps_est - np.mean(eps)

        percentile = percentileofscore(
            eps, eps_est, kind='mean')  # compute percentile of estimated data
        probP[i] = percentile
        sigma_f = np.mean([data[1][2], data[2][2]])  # mean of fs,fc errors

        def eps_CDF(eps):
            """ CDF of epsilon modified chi-squared distribution)"""
            return 1 - np.exp(-eps**2. / (2. * sigma_f**2.))

        def eps_PDF(eps):
            """ PDF of epsilon modified chi-squared distribution"""
            return eps / (sigma_f**2.) * np.exp(-eps**2. / (2. * sigma_f**2.))

        # Compute analytic CDF percentile:
        probA[i] = 100. * eps_CDF(eps_est)

    ## Now compute the whole patch maps
    # First define dataset:
    dat_set = [
        HPow, HPowDeb, HPowSig, logHPowMean, logHexPow, logHexPowDeb, A, frac,
        epsDeb, ang, ang_err, HprobA, HprobP, probA, probP, logA
    ]
    names = [r'Hexadecapole Amplitude',r'Hexadecapole Debiased Amplitude',r'Hexadecapole "Significance"',r'log(Hexadecapole MC Isotropic Mean)',\
        r'log(Hexadecapole Amplitude)',r'Log(Hexadecapole Debiased Amplitude)',r'Debiased Monopole Amplitude',r'Anisotropy Fraction',r'Debiased Epsilon',\
        r'Anisotropy Angle',r'Anisotropy Angle Error',r'Hexadecapole Percentile (Analytic)',r'Hexadecapole Percentile (Statistical)',\
        r'Epsilon Percentile (Analytic)',r'Epsilon Percentile (Statistical)','log(Monopole Amplitude)']
    file_str =['hexPow','hexPowDeb','hexPowSig','HexPowMeanMC','logHexPow','logHexPowDeb','Adebiased','eps','epsDeb','ang','angErr','hexProbAna','hexProbStat',\
        'epsProbAna','epsProbStat','logA']

    if len(names) != len(file_str) or len(names) != len(dat_set):
        raise Exception('Wrong length of files')  # for testing

    # Load coordinates of map centres
    from .NoisePower import good_coords
    ra, dec = good_coords(map_size, sep, len(goodMaps))
    # Load in border of BICEP region if necessary:
    border = False
    if a.root_dir == '/data/ohep2/WidePatch/' or a.root_dir == '/data/ohep2/CleanWidePatch/' or a.root_dir == '/data/ohep2/FFP8/':
        border = True  # for convenience
        from hades.plotTools import BICEP_border
        temp = BICEP_border(map_size, sep)
        if temp != None:
            edge_ra, edge_dec = temp
            # to only use cases where border is available
        else:
            border = False
    if border != False:
        border_coords = [edge_ra, edge_dec]
    else:
        border_coords = None  # for compatibility

    # Now plot on grid:
    from hades.plotTools import mollweide_map
    import cmocean  # for angle colorbar
    for j in range(len(names)):
        print 'Generating patch map %s of %s' % (j + 1, len(names))
        cmap = 'jet'
        minMax = None
        if file_str[j] == 'ang':
            cmap = cmocean.cm.phase
        if file_str[j] == 'angErr':
            minMax = [0., 45.]
        if file_str[j] == 'epsDeb':
            minMax = [-5, 5]
        if file_str[j] == 'eps':
            vmin = min(dat_set[j])
            vmax = min([1., np.percentile(dat_set[j], 95)])
            minMax = [vmin, vmax]
        decA, raA = dec.copy(), ra.copy()
        if file_str[j] == 'logA':
            ids = np.where(dat_set[j] != 1000.)
            dat_set[j] = dat_set[j][ids]
            raA = raA[ids]
            decA = decA[ids]
        if file_str[j] == 'logHexPowDeb':
            ids = np.where(dat_set[j] != 1000.)
            dat_set[j] = dat_set[j][ids]
            raA = raA[ids]
            decA = decA[ids]
        # Create plot
        mollweide_map(dat_set[j],raA,decA,cbar_label=names[j],cmap=cmap,minMax=minMax,\
            border=border_coords,outFile=outDir+file_str[j]+'.png',decLims=[-90,90,10],raLims=[-180,180,10])
    print 'Plotting complete'