Example #1
0
def get_beam_dic(freqs,
                 beam_noise_dic,
                 lmax,
                 opbeam=None,
                 make_2d=0,
                 mapparams=None):
    bl_dic = {}
    for freq in freqs:
        beamval, noiseval = beam_noise_dic[freq]
        bl_dic[freq] = H.gauss_beam(np.radians(beamval / 60.), lmax=lmax - 1)

        if make_2d:
            assert mapparams is not None
            el = np.arange(len(bl_dic[freq]))
            bl_dic[freq] = flatsky.cl_to_cl2d(el, bl_dic[freq], mapparams)

    if opbeam is not None:
        bl_dic['effective'] = H.gauss_beam(np.radians(opbeam / 60.),
                                           lmax=lmax - 1)

        if make_2d:
            assert mapparams is not None
            bl_dic['effective'] = flatsky.cl_to_cl2d(el, bl_dic['effective'],
                                                     mapparams)

    return bl_dic
Example #2
0
    def __init__(self, mask_in, nside, bin_w, lmin, lmax, beams, wsp = True):
        
        '''
        class for Band-Power-Estimation;
        
        Define the **apodized mask**, **beam weights**, **nside**, **bin-scheme**, **ell**
        
        ------------------------
        beams : a numpy array which include fwhms for every frequency. Deconvolve to ** lmax=2*nside **
        
      
        '''
        self.mask = nmt.mask_apodization(mask_in, 6, apotype='C2')
        
        self.nside = nside; self.lmax = lmax; self.Nf = len(beams); self.beams = beams;
        
#         self.beam = hp.gauss_beam(beams/60/180*np.pi, lmax = 3*self.nside); 
        
#         self.b = nmt.NmtBin(self.nside, nlb=bin_w, lmax=self.lmax, is_Dell = True)
        self.b = self.bands(bin_w = bin_w, lmin = lmin, lmax = lmax);
        
        self.ell_n = self.b.get_effective_ells(); self.lbin = len(self.ell_n)
#         self.w00 = [];
#         self.w02 = [];
        self.w22 = [];
                
        # - To construct a empty template with a mask to calculate the **coupling matrix**
        
        if wsp is True:
            
            qu = np.ones((2, 12*self.nside**2))

            for i in range(self.Nf):
                
                beam_i = hp.gauss_beam(beams[i]/60/180*np.pi, lmax = 3*self.nside - 1);
                
#                 m0 = nmt.NmtField(self.mask,[qu[0]],purify_e=False, purify_b=True, beam = beam_i);
                
#                 # construct a workspace that calculate the coupling matrix first.
#                 _w00 = nmt.NmtWorkspace()
#                 _w00.compute_coupling_matrix(m0, m0, self.b)  ## spin-0 with spin-0
                
#                 self.w00.append(_w00);
                
                for j in range(self.Nf):
                    
                    beam_j = hp.gauss_beam(beams[j]/60/180*np.pi, lmax = 3*self.nside - 1);
                    
                    m20 = nmt.NmtField(self.mask, qu, purify_e=False, purify_b=True, beam = beam_i);
                    m21 = nmt.NmtField(self.mask, qu, purify_e=False, purify_b=True, beam = beam_j);
            
#                     _w02 = nmt.NmtWorkspace()
#                     _w02.compute_coupling_matrix(m0, m21, self.b)  ## spin-0 with spin-2

                    _w22 = nmt.NmtWorkspace()
                    _w22.compute_coupling_matrix(m20, m21, self.b)  ## spin-2 with spin-2

            
#                     self.w02.append(_w02); 
                    self.w22.append(_w22)
Example #3
0
    def get_fields(self, map, mask_apo=None, purify_e=False, purify_b=True, beam_correction=None):
        """

        Parameters
        ----------
        map: array
            IQU maps, shape (3, #pixels)
        mask_apo: array, optionnal (if not given the one used at the object's instanciation is used)
            Apodized mask.
        purify_e: bool, optional
            False by default.
        purify_b: bool, optional
            True by default.
            Note that generally it's not a good idea to purify both,
            since you'll lose sensitivity on E
        beam_correction: bool, optional
            None by default.
            If True, a correction by the Qubic beam at 150GHz is applied.
            You can also give the beam FWHM you want to correct for.
        Returns
        -------
        f0, f2: spin-0 and spin-2 Namaster fields.

        """

        # The maps may contain hp.UNSEEN - They must be replaced with zeros
        undefpix = map == hp.UNSEEN
        map[undefpix] = 0
        mp_t, mp_q, mp_u = map
        nside = hp.npix2nside(len(mp_t))

        if mask_apo is None:
            mask_apo = self.mask_apo

        if beam_correction is not None:
            if beam_correction is True:
                # Default value for QUBIC at 150 GHz
                beam = hp.gauss_beam(np.deg2rad(0.39268176),
                                     lmax=3 * nside - 1)
            else:
                beam = hp.gauss_beam(np.deg2rad(beam_correction),
                                     lmax=3 * nside - 1)
        else:
            beam = None

        f0 = nmt.NmtField(mask_apo,
                          [mp_t],
                          beam=beam)

        f2 = nmt.NmtField(mask_apo,
                          [mp_q, mp_u],
                          purify_e=purify_e,
                          purify_b=purify_b,
                          beam=beam)

        self.f0 = f0
        self.f2 = f2

        return f0, f2
Example #4
0
 def crosWSP_TT(self, maps, beams=[None,None]):
     # assemble NaMaster fields
     if beams[0] is None:
         f01 = nmt.NmtField(self._apomask, [maps[0]])
     else:
         f01 = nmt.NmtField(self._apomask, [maps[0]], beam=hp.gauss_beam(beams[0], 3*self._nside-1))
     if beams[1] is None:
         f02 = nmt.NmtField(self._apomask, [maps[3]])
     else:
         f02 = nmt.NmtField(self._apomask, [maps[3]], beam=hp.gauss_beam(beams[1], 3*self._nside-1))
     # prepare workspace
     w = nmt.NmtWorkspace()
     w.compute_coupling_matrix(f01, f02, self._b)
     return w
Example #5
0
def get_alm_maps(pixel_maps,
                 fwhms,
                 resol_correction=False,
                 ref_fwhm=0,
                 pixwin_correction=False,
                 verbose=False):
    """
    Compute alm maps from pixel maps and format them for FgBuster.
    """
    sh = np.shape(pixel_maps)
    nside = hp.npix2nside(sh[2])
    n = sh[0]
    lmax = 2 * nside + 1
    ell = np.arange(start=0, stop=lmax + 1)

    ref_sigma_rad = np.deg2rad(ref_fwhm) / 2.355
    #ref_fl = np.exp(- 0.5 * np.square(ref_sigma_rad * ell))
    ref_fl = hp.gauss_beam(np.deg2rad(ref_fwhm), lmax=lmax)

    if verbose:
        print('In get_alm_maps: FWHM = ', fwhms)
    beam_sigmas_rad = np.deg2rad(fwhms) / (2 * np.sqrt(2 * np.log(2)))
    pixwin = hp.pixwin(nside,
                       lmax=lmax) if pixwin_correction else np.ones(lmax + 1)

    # compute maps
    #figure()
    alm_maps = None
    for f in range(n):
        alms = hp.map2alm(pixel_maps[f], lmax=lmax, pol=True)
        correction = None
        if f == 0:
            sh = np.shape(alms)
            alm_maps = np.empty((n, sh[0], 2 * sh[1]))
        if resol_correction:
            print('Applying Resol Correction')
            #gauss_fl = np.exp(- 0.5 * np.square(beam_sigmas_rad[f] * ell))
            gauss_fl = hp.gauss_beam(np.deg2rad(fwhms[f]), lmax=lmax)
            correction = ref_fl / gauss_fl / pixwin
            #plot(correction, label='freq {}'.format(f))
        else:
            print('No Resol Correction applied')
        for i, t in enumerate(alms):
            alm_maps[f, i] = format_alms(
                hp.almxfl(t, correction) if resol_correction else t)
    #legend()
    #title('Bl ratio in get_alm_maps')
    return alm_maps
Example #6
0
def get_isocov(params, exp, LD_res, HD_res=14, pyFFTWthreads=4):
    """
    Set HD_res to 14 for full sky sampled at res LD.
    """
    #sN_uKamin, sN_uKaminP, Beam_FWHM_amin, ellmin, ellmax = get_config(exp)
    sN_uKamin = params.sN_uKamin
    sN_uKaminP = params.sN_uKaminP
    Beam_FWHM_amin = params.Beam_FWHM_amin
    ellmin = params.lmin
    ellmax = params.lmax
    cls_unl, cls_len = get_fidcls(ellmax_sky=ellmax_sky,path_to_inputs=path_to_inputs)

    cls_noise = {}
    cls_noise['t'] = (sN_uKamin * np.pi / 180. / 60.) ** 2 * np.ones(ellmax_sky + 1)  # simple flat noise Cls
    cls_noise['q'] = (sN_uKaminP * np.pi / 180. / 60.) ** 2 * np.ones(ellmax_sky + 1)  # simple flat noise Cls
    cls_noise['u'] = (sN_uKaminP * np.pi / 180. / 60.) ** 2 * np.ones(ellmax_sky + 1)  # simple flat noise Cls
    cl_transf = hp.gauss_beam(Beam_FWHM_amin / 60. * np.pi / 180., lmax=ellmax_sky)
    lib_alm = ffs_covs.ell_mat.ffs_alm_pyFFTW(get_ellmat(LD_res, HD_res=HD_res),
                                              filt_func=lambda ell: (ell >= ellmin) & (ell <= ellmax),
                                              num_threads=pyFFTWthreads)
    lib_skyalm = ffs_covs.ell_mat.ffs_alm_pyFFTW(get_ellmat(LD_res, HD_res=HD_res),
                                                 filt_func=lambda ell: (ell <= ellmax_sky), num_threads=pyFFTWthreads)
    if os.path.exists('/global'):
        lib_dir = '/global/cscratch1/sd/markm/lensit/temp/Covs/%s/LD%sHD%s' % (exp, LD_res, HD_res)
    else:
        lib_dir = '/media/sf_C_DRIVE/Users/DarkMatter42/OneDrive - University of Sussex/LensIt/temp/Covs/%s/LD%sHD%s' % (exp, LD_res, HD_res)
    return ffs_covs.ffs_cov.ffs_diagcov_alm(lib_dir, lib_alm, cls_unl, cls_len, cl_transf, cls_noise,
                                            lib_skyalm=lib_skyalm)
Example #7
0
 def test_gauss_beam(self):
     idl_gauss_beam = np.array(
         pf.open(
             os.path.join(self.path, 'data',
                          'gaussbeam_10arcmin_lmax512_pol.fits'))[0].data).T
     gauss_beam = hp.gauss_beam(np.radians(10. / 60.), lmax=512, pol=True)
     np.testing.assert_allclose(idl_gauss_beam, gauss_beam)
Example #8
0
 def test_gauss_beam(self):
     with pf.open(
             os.path.join(self.path, "data",
                          "gaussbeam_10arcmin_lmax512_pol.fits")) as f:
         idl_gauss_beam = np.array(f[0].data).T
     gauss_beam = hp.gauss_beam(np.radians(10.0 / 60.0), lmax=512, pol=True)
     np.testing.assert_allclose(idl_gauss_beam, gauss_beam)
Example #9
0
def get_isocov(exp, LD_res, HD_res=14, pyFFTWthreads=4):
    """
    Set HD_res to 14 for full sky sampled at res LD.
    """
    sN_uKamin, sN_uKaminP, Beam_FWHM_amin, ellmin, ellmax = get_config(exp)
    cls_unl, cls_len = get_fidcls(ellmax_sky=ellmax_sky)

    cls_noise = {}
    cls_noise['t'] = (sN_uKamin * np.pi / 180. / 60.)**2 * np.ones(
        ellmax_sky + 1)  # simple flat noise Cls
    cls_noise['q'] = (sN_uKaminP * np.pi / 180. / 60.)**2 * np.ones(
        ellmax_sky + 1)  # simple flat noise Cls
    cls_noise['u'] = (sN_uKaminP * np.pi / 180. / 60.)**2 * np.ones(
        ellmax_sky + 1)  # simple flat noise Cls
    cl_transf = hp.gauss_beam(Beam_FWHM_amin / 60. * np.pi / 180.,
                              lmax=ellmax_sky)
    lib_alm = ffs_covs.ell_mat.ffs_alm_pyFFTW(
        get_ellmat(LD_res, HD_res=HD_res),
        filt_func=lambda ell: (ell >= ellmin) & (ell <= ellmax),
        num_threads=pyFFTWthreads)
    lib_skyalm = ffs_covs.ell_mat.ffs_alm_pyFFTW(get_ellmat(LD_res,
                                                            HD_res=HD_res),
                                                 filt_func=lambda ell:
                                                 (ell <= ellmax_sky),
                                                 num_threads=pyFFTWthreads)

    lib_dir = LENSITDIR + '/temp/Covs/%s/LD%sHD%s' % (exp, LD_res, HD_res)
    return ffs_covs.ffs_cov.ffs_diagcov_alm(lib_dir,
                                            lib_alm,
                                            cls_unl,
                                            cls_len,
                                            cl_transf,
                                            cls_noise,
                                            lib_skyalm=lib_skyalm)
Example #10
0
def GetTauMap(tmap,
              cltt,
              nltt,
              nside=None,
              lmax=None,
              mmax=None,
              fwhm=0.,
              pixwin=False):
    if nside is None:
        nside = hp.npix2nside(tmap.size)
    if lmax is None:
        lmax = 2 * nside

    assert (len(cltt) >= lmax + 1)
    assert (len(nltt) >= lmax + 1)

    talm = hp.map2alm(tmap, lmax=lmax, mmax=mmax)

    if fwhm != 0.:
        bl = hp.gauss_beam(np.radians(fwhm / 60.), lmax=lmax)
        talm = hp.almxfl(talm, bl, mmax=mmax)

    fl1 = cltt / (cltt + nltt)  # probably nltt has to be divided by bl
    fl2 = 1. / (cltt + nltt)  # probably nltt has to be divided by bl
    fl1[:2] = 0.
    fl2[:2] = 0.

    alm1 = hp.almxfl(talm, fl1, mmax=mmax)
    alm2 = hp.almxfl(talm, fl2, mmax=mmax)

    return hp.alm2map(
        alm1, nside, lmax=lmax, mmax=mmax, pixwin=pixwin,
        fwhm=fwhm) * hp.alm2map(
            alm2, nside, lmax=lmax, mmax=mmax, pixwin=pixwin, fwhm=fwhm)
Example #11
0
def _get_alms(data, beams=None, lmax=None, weights=None, iter=3):
    alms = []
    for f, fdata in enumerate(data):
        if weights is None:
            alms.append(hp.map2alm(fdata, lmax=lmax, iter=iter))
        else:
            alms.append(
                hp.map2alm(hp.ma(fdata) * weights, lmax=lmax, iter=iter))
        logging.info(f"{f+1} of {len(data)} complete")
    alms = np.array(alms)

    if beams is not None:
        logging.info('Correcting alms for the beams')
        for fwhm, alm in zip(beams, alms):
            bl = hp.gauss_beam(np.radians(fwhm / 60.0),
                               lmax,
                               pol=(alm.ndim == 2))
            if alm.ndim == 1:
                alm = [alm]
                bl = [bl]

            for i_alm, i_bl in zip(alm, bl.T):
                hp.almxfl(i_alm, 1.0 / i_bl, inplace=True)

    return alms
Example #12
0
def alms_from_maps(maps, beams=None):
    """ Function to get alms from healpix maps.

    Correct for the beams (if any). Beams are assumed to be gaussian.
    Parameters
    ----------
    maps : list
        list containing the frequency maps that can have different nside
    beams : ndarray
        beams associated with each map (assumed gaussian)
    Returns
    -------
    alms : ndarray
        array storing the spherical harmonic transform of each freq map.
        Shape is ``(freqs, lm, ri)``

    """
    if not isinstance(maps, list):
        maps = [maps]
    lmax_maps_list = [3 * hp.get_nside(fmap) - 1 for fmap in maps]
    lmax = min(lmax_maps_list)
    alms = []
    for f, fmaps in enumerate(maps):
        alms.append(hp.map2alm(fmaps, lmax=lmax))
    if beams is not None:
        for fwhm, alm in zip(beams, alms):
            lmax = hp.Alm.getlmax(len(alm))
            bl = hp.gauss_beam(np.radians(fwhm / 60.0), lmax)
            for i_alm, i_bl in zip(alm, bl.T):
                hp.almxfl(i_alm, 1.0 / i_bl, inplace=True)
    return np.array(alms)
Example #13
0
def deconv_planet(matrix, planet='', w_cutsky=None, masks_means=None):
    lmax = np.size(matrix, 0) - 1
    n1 = np.size(matrix, 1)
    n2 = np.size(matrix, 2)
    factor = 1.17741 / 60  # turns radius in arcsec into FWHM in arcmin
    am2rad = np.radians(1 / 60)  # turns arcmin into radians
    if planet == 'Saturn':
        #  9.50''
        #wl_planet = qb.spectra.blm_cl(qb.blm_gauss(9.50*factor, lmax))
        wl_planet = (hp.gauss_beam(9.50 * factor * am2rad, lmax=lmax))**2
    else:
        wl_planet = np.ones((lmax + 1), dtype=type(1.))
    print(prefix, planet + ' (2000) ', wl_planet[2000].flatten(), flush=True)
    for i in range(n1):
        for j in range(n2):
            matrix[:, i, j] /= wl_planet

    if w_cutsky is not None:
        print(prefix,
              'W  cut sky (2000) ',
              w_cutsky[:, :, 2000].flatten(),
              flush=True)
        for i in range(n1):
            for j in range(n2):
                matrix[:, i, j] *= w_cutsky[i, j, :]

    if masks_means is not None:
        print(prefix, 'masks means', masks_means.flatten(), flush=True)
        for i in range(n1):
            for j in range(n2):
                matrix[:, i, j] /= masks_means[i, j]

    return matrix
Example #14
0
def rspectrum(nu, r, sig, scaling=1.0):
    """
    Calculates the CMB amplituded given a value of r and requested modes
    """
    import camb
    from camb import model, initialpower
    import healpy as hp
    #Set up a new set of parameters for CAMB
    pars = camb.CAMBparams()
    #This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
    pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
    pars.InitPower.set_params(As=2e-9, ns=0.965, r=r)
    lmax=6000
    pars.set_for_lmax(lmax,  lens_potential_accuracy=0)
    pars.WantTensors = True
    results = camb.get_results(pars)
    powers = results.get_cmb_power_spectra(params=pars, lmax=lmax, CMB_unit='muK', raw_cl=True,)


    l = np.arange(2,lmax+1)

    if sig == "TT":
        cl = powers['unlensed_scalar']
        signal = 0
    elif sig == "EE":
        cl = powers['unlensed_scalar']
        signal = 1
    elif sig == "BB":
        cl = powers['tensor']
        signal = 2

    bl = hp.gauss_beam(40/(180/np.pi*60), lmax,pol=True)
    A = np.sqrt(sum( 4*np.pi * cl[2:,signal]*bl[2:,signal]**2/(2*l+1) ))
    return cmb(nu, A*scaling)
Example #15
0
def get_noisefree_maps_lib_for_MCN1(params, exp, LDres, HDres=14, cache_lenalms=True, cache_maps=False, nsims=120, num_threads=4,path_to_inputs='.'):
    """
    Default simulation library of 120 full flat sky sims with no instrument noise at resolution LDres.
    Different exp at same resolution share the same random phases both in CMB and noise
        Will build all phases at the very first call if not already present.
    :param exp: 'Planck', 'S4' ... See get_config
    :param LDres: 14 : cell length is 0.745 amin, 13 : 1.49 etc.
    :return: sim library instance
    """
    #_, _, Beam_FWHM_amin, ellmin, ellmax = get_config(exp)
    Beam_FWHM_amin = params.Beam_FWHM_amin
    ellmin = params.lmin
    ellmax = params.lmax
    len_cmbs = get_lencmbs_lib_for_MCN1(res=HDres, cache_sims=cache_lenalms, nsims=nsims,path_to_inputs=path_to_inputs, Beam_FWHM_amin=Beam_FWHM_amin)
    lmax_sky = len_cmbs.lib_skyalm.ellmax
    cl_transf = hp.gauss_beam(Beam_FWHM_amin / 60. * np.pi / 180., lmax=lmax_sky)
    lib_datalm = ffs_covs.ell_mat.ffs_alm_pyFFTW(get_ellmat(LDres, HDres), filt_func=lambda ell: ell <= lmax_sky,
                                                 num_threads=num_threads)
    fsky = int(np.round(np.prod(len_cmbs.lib_skyalm.ell_mat.lsides) / 4. / np.pi * 1000.))
    vcell_amin2 = np.prod(lib_datalm.ell_mat.lsides) / np.prod(lib_datalm.ell_mat.shape) * (180 * 60. / np.pi) ** 2
    
    if os.path.exists('/global'):
        pixpha = sims.ffs_phas.pix_lib_phas('/global/cscratch1/sd/markm/lensit/temp/%s_sims/fsky%04d/beam%d/res%s/pixpha' % (nsims, fsky, Beam_FWHM_amin*10, LDres), 3, lib_datalm.ell_mat.shape, nsims_max=nsims)
    else:
        pixpha = sims.ffs_phas.pix_lib_phas('/media/sf_C_DRIVE/Users/DarkMatter42/OneDrive - University of Sussex/LensIt/temp/%s_sims/fsky%04d/beam%d/res%s/pixpha' % (nsims, fsky, Beam_FWHM_amin*10, LDres), 3, lib_datalm.ell_mat.shape, nsims_max=nsims)
    		
    if not pixpha.is_full() and pbs.rank == 0:
        for _i, idx in misc.misc_utils.enumerate_progress(np.arange(nsims), label='Generating Noise phases'):
            pixpha.get_sim(idx)
    pbs.barrier()
    if os.path.exists('/global'):
        lib_dir = '/global/cscratch1/sd/markm/lensit/temp/AN_sims_for_MCN1/%s_sims/fsky%04d/beam%d/res%s/%s/maps' % (nsims, fsky, Beam_FWHM_amin*10, LDres, exp)
    else:
        lib_dir = '/media/sf_C_DRIVE/Users/DarkMatter42/OneDrive - University of Sussex/LensIt/temp/AN_sims_for_MCN1/%s_sims/fsky%04d/beam%d/res%s/%s/maps' % (nsims, fsky, Beam_FWHM_amin*10, LDres, exp)
    return sims.ffs_maps.lib_noisefree(lib_dir, lib_datalm, len_cmbs, cl_transf, cache_sims=cache_maps)
Example #16
0
    def __init__(self, comm=None, signal_map="signal_map",
                 lmax=None, grid=None, fwhm_deg=None, beam=None,
                 out="smoothed_signal_map"):
        autotimer = timing.auto_timer(type(self).__name__)
        # We call the parent class constructor, which currently does nothing
        super().__init__()
        self.comm = comm
        self.signal_map = signal_map
        self.lmax = lmax
        self.out = out
        self.grid = grid

        # distribute alms
        local_m_indices = np.arange(self.comm.rank, lmax + 1, self.comm.size,
                                    dtype=np.int32)

        self.order = libsharp.packed_real_order(lmax, ms=local_m_indices)

        if (fwhm_deg is not None) and (beam is not None):
            raise Exception("OpSmooth error, specify either fwhm_deg or beam, "
                            "not both")

        if (fwhm_deg is None) and (beam is None):
            raise Exception("OpSmooth error, specify fwhm_deg or beam")

        if fwhm_deg is not None:
            self.beam = hp.gauss_beam(fwhm=np.radians(fwhm_deg), lmax=lmax,
                                      pol=True)
        else:
            self.beam = beam
Example #17
0
 def cross_t(self, maps, wsp=None, fwhms=[None,None]):
     """
     Cross PS,
     apply NaMaster estimator to T (scalar) map with(out) masks.
     
     Parameters
     ----------
     
     maps : numpy.ndarray
         A two-row array array of two T maps.
         
     wsp : (PS-estimator-defined) workspace
         A template of mask-induced mode coupling matrix.
         
     fwhms : list, tuple
         FWHM of gaussian beams
     
     Returns
     -------
     
     pseudo-PS results : tuple of numpy.ndarray
         (ell, TT, wsp(if input wsp is None))
     """
     assert isinstance(maps, np.ndarray)
     assert (maps.shape == (2,self._npix))
     assert (len(fwhms) == 2)
     # assemble NaMaster fields
     if fwhms[0] is None:
         _f01 = nmt.NmtField(self._mask[0], [maps[0]])
     else:
         _f01 = nmt.NmtField(self._mask[0], [maps[0]], beam=hp.gauss_beam(fwhms[0], 3*self._nside-1))
     if fwhms[1] is None:
         _f02 = nmt.NmtField(self._mask[0], [maps[1]])
     else:
         _f02 = nmt.NmtField(self._mask[0], [maps[1]], beam=hp.gauss_beam(fwhms[1], 3*self._nside-1))
     # estimate PS
     if wsp is None:
         _w = nmt.NmtWorkspace()
         _w.compute_coupling_matrix(_f01, _f02, self._b)
         _cl00c = nmt.compute_coupled_cell(_f01, _f02)
         _cl00 = _w.decouple_cell(_cl00c)
         return (self._modes, _cl00[0], _w)
     else:
         _cl00c = nmt.compute_coupled_cell(_f01, _f02)
         _cl00 = wsp.decouple_cell(_cl00c)
         return (self._modes, _cl00[0])
Example #18
0
 def beamfunc1(self):
     if hasattr(self.beam1, '__iter__'):
         self._beamfunc1 = self.beam1
     else:
         self._beamfunc1 = hp.gauss_beam(np.deg2rad(self.beam1),
                                         lmax=self.lmax - 1,
                                         pol=False)
     return self._beamfunc1
Example #19
0
def _get_Cl_noise(instrument, A, lmax):
    bl = [
        hp.gauss_beam(np.radians(b / 60.), lmax=lmax) for b in instrument.Beams
    ]
    nl = (np.array(bl) / np.radians(instrument.Sens_P / 60.)[:, np.newaxis])**2
    AtNA = np.einsum('fi, fl, fj -> lij', A, nl, A)
    inv_AtNA = np.linalg.inv(AtNA)
    return inv_AtNA.swapaxes(-3, -1)
Example #20
0
 def test_gauss_beam(self):
     idl_gauss_beam = np.array(
         pf.open(
             os.path.join(self.path, "data", "gaussbeam_10arcmin_lmax512_pol.fits")
         )[0].data
     ).T
     gauss_beam = hp.gauss_beam(np.radians(10. / 60.), lmax=512, pol=True)
     np.testing.assert_allclose(idl_gauss_beam, gauss_beam)
Example #21
0
 def optimal_lmax(self, fwhm_in, nside_in):
     lmax = 2 * min(nside_in, self.nside)
     if fwhm_in < self.fwhm and self.quickpolbeam is None:
         beam = hp.gauss_beam(self.fwhm * arcmin, lmax=lmax, pol=False)
         better_lmax = np.argmin(np.abs(beam - 1e-4)) + 1
         if better_lmax < lmax:
             lmax = better_lmax
     return lmax
Example #22
0
def get_maps_lib(exp, LDres, HDres=14, cache_lenalms=True, cache_maps=False, \
    nsims=120, num_threads=4, do_tensor_only=False, fn_tensCls=None):
# end CH
    """
    Default simulation library of 120 full flat sky sims for exp 'exp' at resolution LDres.
    Different exp at same resolution share the same random phases both in CMB and noise
        Will build all phases at the very first call if not already present.
    :param exp: 'Planck', 'S4' ... See get_config
    :param LDres: 14 : cell length is 0.745 amin, 13 : 1.49 etc.
    :return: sim library instance
    """
    sN_uKamin, sN_uKaminP, Beam_FWHM_amin, ellmin, ellmax = get_config(exp)
    # Added by Chen Heinrich for Lensnet (2019/10)
    #len_cmbs = get_lencmbs_lib(
    #    #res=HDres, cache_sims=cache_lenalms, nsims=nsims)
    if do_tensor_only == False:
        len_cmbs = get_lencmbs_lib(
            res=HDres, cache_sims=cache_lenalms, nsims=nsims) #CH19
    else:
        len_cmbs = get_lencmbs_lib_tensor(fn_tensCls,
            res=HDres, cache_sims=cache_lenalms, nsims=nsims, \
            num_threads=num_threads) #CH19    
    # end CH
    lmax_sky = len_cmbs.lib_skyalm.ellmax
    cl_transf = hp.gauss_beam(
        Beam_FWHM_amin / 60. * np.pi / 180., lmax=lmax_sky)
    lib_datalm = ffs_covs.ell_mat.ffs_alm_pyFFTW(get_ellmat(LDres, HDres), filt_func=lambda ell: ell <= lmax_sky,
                                                 num_threads=num_threads)
    fsky = int(
        np.round(np.prod(len_cmbs.lib_skyalm.ell_mat.lsides) / 4. / np.pi * 1000.))
    vcell_amin2 = np.prod(lib_datalm.ell_mat.lsides) / \
        np.prod(lib_datalm.ell_mat.shape) * (180 * 60. / np.pi) ** 2
    nTpix = sN_uKamin / np.sqrt(vcell_amin2)
    nPpix = sN_uKaminP / np.sqrt(vcell_amin2)

    pixpha = sims.ffs_phas.pix_lib_phas(LENSITDIR + '/temp/%s_sims/fsky%04d/res%s/pixpha' % (nsims, fsky, LDres), 3,
                                        lib_datalm.ell_mat.shape, nsims_max=nsims)
    if not pixpha.is_full() and pbs.rank == 0:
        for _i, idx in misc.misc_utils.enumerate_progress(np.arange(nsims), label='Generating Noise phases'):
            pixpha.get_sim(idx)
    pbs.barrier()
    
    # CH: takes care of different map location if maps are cached
    if fn_tensCls == None:
        cl_file_root = 'lensit_fiducial_tensCls'
    else:
        cl_file_root = os.path.splitext(os.path.basename(fn_tensCls))[0]
    
    if do_tensor_only == True:
        lib_dir = LENSITDIR + \
            '/temp/%s/%s_sims/fsky%04d/res%s/%s/maps_tens' % (cl_file_root, nsims, fsky, LDres, exp)
    else:
        lib_dir = LENSITDIR + \
            '/temp/%s_sims/fsky%04d/res%s/%s/maps' % (nsims, fsky, LDres, exp)
    print('get_maps_lib: maps lib_dir = %s'%lib_dir)  

    return sims.ffs_maps.lib_noisemap(lib_dir, lib_datalm, len_cmbs, cl_transf, nTpix, nPpix, nPpix,
            pix_pha=pixpha, cache_sims=cache_maps)
Example #23
0
def apodize(lib_datalm, mask, sigma_fwhm_armin=12., lmax=None, method='hybrid', mult_factor=3, min_factor=0.1):
    """ Flat sky apodizer directly adapted from Anthony curved sky libaml.apodize """
    if sigma_fwhm_armin <= 0.: return mask
    lmax = lmax or lib_datalm.ell_mat.ellmax
    libalm = li.ffs_covs.ell_mat.ffs_alm_pyFFTW(lib_datalm.ell_mat, filt_func=lambda ell: ell <= lmax)
    print 'fsky_unapodized = %.5f' % (np.sum(mask ** 2) / mask.size)
    bl = hp.gauss_beam(sigma_fwhm_armin / 60. / 180. * np.pi, lmax=lmax)
    apomask = libalm.alm2map(libalm.almxfl(libalm.map2alm(mask), bl))
    print 'Min/max mask smoothed mask', np.min(apomask), np.max(apomask)
    print 'fsky = %.5f' % (np.sum(apomask ** 2) / apomask.size)
    if method == 'gaussian': return apomask
    if method != 'hybrid': raise ValueError('Unknown apodization method')
    apomask = 1 - np.minimum(1., np.maximum(0., mult_factor * (1 - apomask) - min_factor))
    bl = hp.gauss_beam(sigma_fwhm_armin * 0.5 / 60. / 180. * np.pi, lmax=lmax)
    apomask = libalm.alm2map(libalm.almxfl(libalm.map2alm(apomask), bl))
    print 'Min/max mask re-smoothed mask', np.min(apomask), np.max(apomask)
    print 'fsky = %.5f' % (np.sum(apomask ** 2) / apomask.size)
    return apomask
Example #24
0
 def beamfunc1(self):
     if hasattr(self.beam1, '__iter__'):
         self._beamfunc1 = self.beam1
     else:
         self._beamfunc1 = hp.gauss_beam(
             np.deg2rad(self.beam1),
             lmax=self.lmax-1,
             pol=False)
     return self._beamfunc1
Example #25
0
def get_maps_lib(exp,
                 LDres,
                 HDres=14,
                 cache_lenalms=True,
                 cache_maps=False,
                 nsims=120,
                 num_threads=4):
    """
    Default simulation library of 120 full flat sky sims for exp 'exp' at resolution LDres.
    Different exp at same resolution share the same random phases both in CMB and noise
        Will build all phases at the very first call if not already present.
    :param exp: 'Planck', 'S4' ... See get_config
    :param LDres: 14 : cell length is 0.745 amin, 13 : 1.49 etc.
    :return: sim library instance
    """
    sN_uKamin, sN_uKaminP, Beam_FWHM_amin, ellmin, ellmax = get_config(exp)
    len_cmbs = get_lencmbs_lib(res=HDres,
                               cache_sims=cache_lenalms,
                               nsims=nsims)
    lmax_sky = len_cmbs.lib_skyalm.ellmax
    cl_transf = hp.gauss_beam(Beam_FWHM_amin / 60. * np.pi / 180.,
                              lmax=lmax_sky)
    lib_datalm = ffs_covs.ell_mat.ffs_alm_pyFFTW(
        get_ellmat(LDres, HDres),
        filt_func=lambda ell: ell <= lmax_sky,
        num_threads=num_threads)
    fsky = int(
        np.round(
            np.prod(len_cmbs.lib_skyalm.ell_mat.lsides) / 4. / np.pi * 1000.))
    vcell_amin2 = np.prod(lib_datalm.ell_mat.lsides) / np.prod(
        lib_datalm.ell_mat.shape) * (180 * 60. / np.pi)**2
    nTpix = sN_uKamin / np.sqrt(vcell_amin2)
    nPpix = sN_uKaminP / np.sqrt(vcell_amin2)

    pixpha = sims.ffs_phas.pix_lib_phas(LENSITDIR +
                                        '/temp/%s_sims/fsky%04d/res%s/pixpha' %
                                        (nsims, fsky, LDres),
                                        3,
                                        lib_datalm.ell_mat.shape,
                                        nsims_max=nsims)
    if not pixpha.is_full() and pbs.rank == 0:
        for _i, idx in misc.misc_utils.enumerate_progress(
                np.arange(nsims), label='Generating Noise phases'):
            pixpha.get_sim(idx)
    pbs.barrier()
    lib_dir = LENSITDIR + '/temp/%s_sims/fsky%04d/res%s/%s/maps' % (
        nsims, fsky, LDres, exp)
    return sims.ffs_maps.lib_noisemap(lib_dir,
                                      lib_datalm,
                                      len_cmbs,
                                      cl_transf,
                                      nTpix,
                                      nPpix,
                                      nPpix,
                                      pix_pha=pixpha,
                                      cache_sims=cache_maps)
Example #26
0
    def __mk_spectral_matrix_from_cl(self, cl=None):
        """ Here in grid units """
        # TODO you may improve on that, in part. in respect to circulant embedding, zero mode etc.: Harmonize with others

        if cl is None:
            beam = gauss_beam(self.Beam_FWHM_amin * np.pi / 180. / 60., lmax=len(self.cl_unl) - 1)
            cl = self.cl_unl * beam ** 2

        return np.interp(self.lib_cub.sqd_freqmap().flatten(), np.arange(len(cl))**2,
                         cl * self.__nbar(),left =0.,right = 0.).reshape(self.shape)
Example #27
0
 def autoWSP_TT(self, maps, beams=None):
     # assemble NaMaster fields
     if beams is None:
         f0 = nmt.NmtField(self._apomask, [maps[0]])
     else:
         f0 = nmt.NmtField(self._apomask, [maps[0]], beam=hp.gauss_beam(beams, 3*self._nside-1))
     # prepare workspace
     w = nmt.NmtWorkspace()
     w.compute_coupling_matrix(f0, f0, self._b)
     return w
Example #28
0
 def crosBP_TT(self, maps, wsp=None, beams=[None,None]):
     dat1 = maps[0]
     dat2 = maps[3]
     # assemble NaMaster fields
     if beams[0] is None:
         f01 = nmt.NmtField(self._apomask, [dat1])
     else:
         f01 = nmt.NmtField(self._apomask, [dat1], beam=hp.gauss_beam(beams[0], 3*self._nside-1))
     if beams[1] is None:
         f02 = nmt.NmtField(self._apomask, [dat2])
     else:
         f02 = nmt.NmtField(self._apomask, [dat2], beam=hp.gauss_beam(beams[1], 3*self._nside-1))
     # estimate PS
     if wsp is None:
         cl00 = nmt.compute_full_master(f01, f02, self._b)
         return (self._modes, self.rebinning(cl00[0]))
     else:
         cl00c = nmt.compute_coupled_cell(f01, f02)
         cl00 = wsp.decouple_cell(cl00c)
         return (self._modes, self.rebinning(cl00[0]))
Example #29
0
def _get_Cl_noise(instrument, A, lmax):
    try:
        bl = np.array([hp.gauss_beam(np.radians(b/60.), lmax=lmax)
                       for b in instrument.fwhm])
    except AttributeError:
        bl = np.ones((len(instrument.frequency), lmax+1))

    nl = (bl / np.radians(instrument.depth_p/60.)[:, np.newaxis])**2
    AtNA = np.einsum('fi, fl, fj -> lij', A, nl, A)
    inv_AtNA = np.linalg.inv(AtNA)
    return inv_AtNA.swapaxes(-3, -1)
def sensitivity_to_noise_mean_Cl(det_sens, beam_fwhm, lmax, t_mission=T_MISSION, n_det=N_DET_145, f_sky=1.0):
    """
    Given the sensitivity of the instrument, observation time and number of detectors, and assuming that the scan was uniform, tis gives us the theoretical mean of the noise spectra for TT, EE, and BB.
    """
    w_inv = sensitivity_to_noise_solid_angle(det_sens, t_mission, n_det)**2
    Bl_squared = hp.gauss_beam(fwhm=beam_fwhm, lmax=lmax, pol=True)**2
    noise_mean = np.empty((3, lmax+1))
    for i in range(3):
        noise_mean[i] = np.sqrt(1.0/f_sky)*w_inv[i]/Bl_squared[...,i]
        noise_mean[i][:2] = 0.0
    return noise_mean
Example #31
0
File: base.py Project: zonca/planck
    def get_gaussian_beam(self, lmax=1024, pol=False, beam_eff=False):
        """Equivalent gaussian beam from RIMO FWHM

        Returns the transfer function of a gaussian beam until lmax,
        either polarized or not"""
        import healpy as hp

        beam = hp.gauss_beam(self.fwhm, lmax, pol)

        if beam_eff:
            beam *= self.beam_efficiency
        return beam
Example #32
0
 def cross_teb(self, maps, wsp=None, fwhms=[None,None]):
     assert isinstance(maps, np.ndarray)
     assert (maps.shape == (6,self._npix))
     # assemble NaMaster fields
     if fwhms[0] is None:
         _f01 = nmt.NmtField(self._mask[0], [maps[0]])
         _f21 = nmt.NmtField(self._mask[0], [maps[1], maps[2]], purify_e=False, purify_b=True)
     else:
         _f01 = nmt.NmtField(self._mask[0], [maps[0]], beam=hp.gauss_beam(fwhms[0], 3*self._nside-1))
         _f21 = nmt.NmtField(self._mask[0], [maps[1], maps[2]], purify_e=False, purify_b=True, beam=hp.gauss_beam(fwhms[0], 3*self._nside-1))
     if fwhms[1] is None:
         _f02 = nmt.NmtField(self._mask[0], [maps[3]])
         _f22 = nmt.NmtField(self._mask[0], [maps[4], maps[5]], purify_e=False, purify_b=True)
     else:
         _f02 = nmt.NmtField(self._mask[0], [maps[3]], beam=hp.gauss_beam(fwhms[1], 3*self._nside-1))
         _f22 = nmt.NmtField(self._mask[0], [maps[4], maps[5]], purify_e=False, purify_b=True, beam=hp.gauss_beam(fwhms[1], 3*self._nside-1))
     # estimate PS
     _cl00 = nmt.compute_full_master(_f01, _f02, self._b)
     _cl02 = nmt.compute_full_master(_f01, _f22, self._b)
     _cl22 = nmt.compute_full_master(_f21, _f22, self._b)
     return (self._modes, _cl00[0], _cl02[0], _cl02[1], _cl22[0], _cl22[1], _cl22[3])
 def set_signal_cov(self, cl, fwhm=None):
     """Set signal cov from Cls and assignt quantities to .signal_cov
     atribute"""
     if fwhm is not None:
         bl = hp.gauss_beam(fwhm, lmax=len(cl) - 1)
         self.signal_cov = sc.SignalCov(bl**2 * cl, self.params.lmax)
     else:
         self.signal_cov = sc.SignalCov(cl, self.params.lmax)
     try:
         self.set_delta()
     except:
         pass
def sensitivity_to_noise_variance_Cl(det_sens, beam_fwhm, lmax, t_mission=T_MISSION, n_det=N_DET_145, f_sky=1.0):
    """
    Given the sensitivity of the instrument, observation time and number of detectors, and assuming that the scan was uniform, this gives us the theoretical variance of the noise spectra for TT, EE, and BB.
    """
    w_inv = sensitivity_to_noise_solid_angle(det_sens, t_mission, n_det)**2
    ell = np.arange(lmax + 1)
    Bl_squared = hp.gauss_beam(fwhm=beam_fwhm, lmax=lmax, pol=True)**2
    variance = np.empty((3, lmax+1))
    for i in range(3):
        variance[i] = (2.0/(2.0*ell + 1)/f_sky)*(w_inv[i]/Bl_squared[...,i])**2
        variance[i][:2] = 0.0
    return variance
Example #35
0
def ini_field(mask, maps, fwhm, temp):
    '''Initialize pymaster field.'''
    if fwhm > 0:
        print('>> Computing Gaussian beam window function, FWHM = {0:f} [radians]'
              .format(fwhm))
        lmax = 3 * hp.get_nside(mask) - 1  # required by PyMaster
        bl = hp.gauss_beam(fwhm, lmax=lmax, pol=False)
    else:
        bl = None
    print('>> Initializing the field...')
    fld = nmt.NmtField(mask, [maps], beam=bl, templates=temp)
    return fld
Example #36
0
def deconvolve_cl(spectra, fwhm, lmax):
    if spectra.ndim == 1:
        pol = False
    else:
        pol = True
    Bl = hp.gauss_beam(fwhm=fwhm, lmax=lmax, pol=pol)

    if pol:
        spectra /= Bl.T**2
    else:
        spectra /= Bl**2

    return spectra
Example #37
0
 def fwhm(self, fwhm):
     if fwhm == 0.0:
         self._fwhm = fwhm
         bl=np.ones(3*self._qml_nside+1)
         l=np.arange(self._qml_nside+1,3*self._qml_nside+1)
         bl[self._qml_nside+1:3*self._qml_nside+1]=0.5*(1+np.sin(l*np.pi/2/self._qml_nside))
         self._bell = bl
     else:
         self._fwhm = fwhm
         bl=np.ones(3*self._qml_nside+1)
         l=np.arange(self._qml_nside+1,3*self._qml_nside+1)
         bl[self._qml_nside+1:3*self._qml_nside+1]=0.5*(1+np.sin(l*np.pi/2/self._qml_nside))
         self._bell = bl*hp.gauss_beam(np.deg2rad(self._fwhm), lmax=3*self._qml_nside)
Example #38
0
    def beamfunc2(self):
        # if self.beam2 is None and self._map2 is None:
        #     self._beamfunc2 = self.beamfunc1
        #     return self._beamfunc2
        if self.beam2 is None:
            self._beamfunc2 = np.ones(self.lmax, dtype=np.float32)

        elif hasattr(self.beam2, '__iter__'):
            self._beamfunc2 = self.beam2
        else:
            self._beamfunc2 = hp.gauss_beam(
                np.deg2rad(self.beam2),
                lmax=self.lmax-1,
                pol=False)
        return self._beamfunc2
Example #39
0
def estimate_cl(sky_map, lmax, binary_mask=None, fwhm=0.0, pol=True):

    sky_map_masked, binary_mask = mask_map(sky_map, binary_mask=binary_mask, pol=pol, ret_mask=True, fill_zeros=True)

    f_sky = float(np.sum(binary_mask))/binary_mask.size

    Bl = hp.gauss_beam(fwhm=fwhm, lmax=lmax, pol=pol)

    if pol:
        spectra = hp.anafast((sky_map_masked[0].filled(), sky_map_masked[1].filled(), sky_map_masked[2].filled()), lmax=lmax)[:4]
        spectra = np.array(spectra)
        spectra /= f_sky*Bl.T**2
    else:
        spectra = hp.anafast(sky_map_masked.filled(), lmax=lmax)
        spectra /= f_sky*Bl**2

    spectra = np.array(spectra)
    return spectra
Example #40
0
def wiener_filter_for_alm(alm, lmax=None, fwhm=0.0, f_sky=1.0, sky_prior=None):

    if lmax is None:
        lmax = hp.Alm.getlmax(len(alm), None)

    if sky_prior is None:
        spectra_th = np.load("/global/homes/b/banerji/simulation/spectra/r_001/lensedtot_cls.npy")[0,:lmax+1]
    else:
        spectra_th = estimate_cl(sky_prior, lmax, fwhm=fwhm, pol=False)

    Bl = hp.gauss_beam(fwhm=fwhm, lmax=lmax, pol=False)
    spectra_ob = hp.alm2cl(alm, lmax_out=lmax)
    spectra_ob /= f_sky*Bl**2

    filter_response = spectra_ob/spectra_th
    filter_response[:2] = 1.0
    filter_response = np.sqrt(filter_response)

    return filter_response
Example #41
0
    def __init__(self, cl_unl, cl_len, LD_res, HD_res, lside, f, f_inv,sN_uKamin,Beam_FWHM_amin,verbose=False):
        """
        f and finv are displacement field classes. Number of points on each side 2**LD_res,2**HD_res.
        f and f_inv must have a f.lens_map routine that does the lensing of map 2**HD_res by 2**HD_res.
        f_inv must also have a f.det_M routine which returns the determinant of the magnification matrix
        at all points of the map.
        """
        self.LD_cub = library_datacube(np.ones(2, dtype=int) * LD_res, np.ones(2) * lside, verbose=verbose)
        self.HD_cub = library_datacube(np.ones(2, dtype=int) * HD_res, np.ones(2) * lside, verbose=verbose)
        self.LD_shape = self.LD_cub.shape()
        self.HD_shape = self.HD_cub.shape()

        self.f_inv = f_inv  # inverse displacement
        self.f = f  # displacement

        self.cl_unl = cl_unl
        self.cl_len = cl_len

        self.sN_uKamin = sN_uKamin
        self.Beam_FWHM_amin = Beam_FWHM_amin
        self.verbose = verbose

        # FIXME : the value of the variance jumps by a factor 5-6 if the l = 0 is interpolated !
        # There is something fishy with the lowest mode.
        # Again circulant embedding is probably better, but how to get the fake spectra ?

        # Builds useful FFT maps.
        # All spectral maps in grid units ! (nbar * PhysicalSpectrum)
        # TODO Hack
        self.N_2p1 = 2**(HD_res -1) + 1
        kmax_grid = np.sqrt(np.sum(self.HD_cub.kmax()**2))
        self.HD_cl_unl_SpecMap = self._mk_HD_spectral_map_from_cl(cl_unl)[:,0:self.N_2p1]
        self.HD_Beam_SpecMap = self._mk_HD_spectral_map_from_cl(
            gauss_beam(Beam_FWHM_amin * np.pi / 180. / 60., lmax=kmax_grid+1))
        self.HD_Beam_SpecMap *= square_pixwin_map(self.HD_shape)

        assert (np.all(self.HD_cl_unl_SpecMap >= 0.)), "Somethings wrong"
        assert (np.all(self.HD_Beam_SpecMap >= 0.)), "Somethings wrong"
Example #42
0
def correlate_noise(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Noise Correlation for Bands "+str(bands)


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')
	#iqu_band_i=hdu_i['stokes iqu'].data
	#iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	

	ind_i=np.argwhere( wl == wl_i)[0][0]
	ind_j=np.argwhere( wl == wl_j)[0][0]

	npix_i=hp.nside2npix(nside_i)
	npix_j=hp.nside2npix(nside_j)
	iqu_band_i=np.zeros((3,npix_i))
	iqu_band_j=np.zeros((3,npix_j))
	
	sigma_i=[noise_const_pol[ind_i]*np.random.normal(0,1,npix_i),noise_const_pol[ind_i]*np.random.normal(0,1,npix_i)]
	sigma_j=[noise_const_pol[ind_j]*np.random.normal(0,1,npix_j),noise_const_pol[ind_j]*np.random.normal(0,1,npix_j)]
	
	iqu_band_i[1]=np.copy(sigma_i[0])
	iqu_band_i[2]=np.copy(sigma_i[1])
	iqu_band_j[1]=np.copy(sigma_j[0])
	iqu_band_j[2]=np.copy(sigma_j[1])
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	#ipdb.set_trace()
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	#hp.write_cl('cl_'+bands+'_FR_noise_QxaU.fits',cross1)
	#hp.write_cl('cl_'+bands+'_FR_noise_UxaQ.fits',cross2)
	return (cross1,cross2)
Example #43
0
def correlate_theory(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Cross Correlations for Bands "+str(bands_name)

	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits.lens'
	
	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	#iqu_band_i=hdu_i['stokes iqu'].data
	#iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	ind_i=np.where( wl == wl_i)[0][0]
	ind_j=np.where( wl == wl_j)[0][0]
	
	cls=hp.read_cl(cl_file)
	simul_cmb=hp.sphtfunc.synfast(cls,max(nside_i,nside_j),fwhm=0.,new=1,pol=1);
	
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');

	##Generate CMB for file J
	
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_j,order_in='ring',order_out='ring')
	simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_j)
	tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_j,alpha_radio);
	iqu_band_j=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_j]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False)
	
	##Generate CMB for file I
	
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_i,order_in='ring',order_out='ring')
	simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_i)
	tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_i,alpha_radio);
	#ipdb.set_trace()
	iqu_band_i=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_i]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False)
	


	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	return (cross1,cross2)
Example #44
0
def plot_mc():
	bins=[1,5,10,20,25,50]
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2.*np.pi)
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	for num, mask_file in enumerate(mask_array):
		f=np.load('prism_simul_'+mask_name[num]+'.npz')
		theory1_array_in=f['the1_in']
		theory2_array_in=f['the2_in']
		cross1_array_in=f['c1_in']
		cross2_array_in=f['c2_in']
		noise1_array_in=f['n1_in']
		noise2_array_in=f['n2_in']
		Ndq_array_in=f['ndq_in']
		Ndu_array_in=f['ndu_in']
		Nau_array_in=f['nau_in']
		Naq_array_in=f['naq_in']

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
#ukdet=np.array([150.,150.,380.])
#ndet=np.array([288.,512.,512.])
#det_eff=.85
#ndays=15.*24*3600
#FPU=np.array([5,5,3])
#
#noise_const_array=ukdet*np.sqrt(4125.*60**2)/np.sqrt(FPU*ndet*det_eff*ndays)/pix_area_array*1e-6

#gamma_dust=6.626e-34/(1.38e-23*21)
#dust_factor=krj_to_kcmb*np.array([1e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353.)**2.54 for x in bands])
#krj_to_kcmb=np.array([1.,1.,1.])
krj_to_kcmb=np.ones_like(bands)
#
sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in bands])

beam=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)

bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2


l=np.arange(3*nside_out)
ll=l*(l+1)/(2*np.pi)


def likelihood(cross,dcross,theory,name,title):

	Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
	#Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))	\
	Noise=np.sqrt(1./np.sum(1./dcross**2))
	Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
	Noise1=np.sqrt(np.sum(dcross**2*(theory/dcross)**2)/np.sum((theory/dcross)**2))
Example #46
0
def main():	
	##Define Files used to make maps
	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits.lens'
	output_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits'
	dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits'
	gamma_dust=6.626e-34/(1.38e-23*21)
	
	##Define Parameters used to simulate Planck Fields
	bands=np.array([30.,44.,70.,100.,143.,217.,353.])
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	#noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6
	#noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	beam_fwhm=np.array([32.29,27,13.21,9.67,7.26,4.96,4.93])
	

	pix_area_array=np.array([np.repeat(hp.nside2pixarea(1024),3),np.repeat(hp.nise2pixarea(2048),4)])
	pix_area_array=np.sqrt(pix_area_array)*60*180./np.pi
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	#noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6
	#noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6
	noise_const_temp=np.array([2.5,2.7,3.5,1.29,.555,.78,2.56])/pix_area_array*60.e-6
	noise_const_pol=np.array([3.5,4.0,5.0,1.96,1.17,1.75,7.31])/pix_area_array*60.e-6
	krj_to_kcmb=np.array([1.0217,1.0517,np.mean([1.1360,1.1405,1.1348]), np.mean([1.3058,1.3057]),np.mean([1.6735,1.6727]),np.mean([3,2203,3.2336,3.2329,3.2161]),np.mean([14.261,14.106])])*1e-6
	sync_factor=krj_to_kcmb*np.array([20.*(.408/x)**2 for x in bands])
	dust_factor=krj_to_kcmb*np.array([163.e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353)**2.54 for x in bands])
	nside=2048
	npix=hp.nside2npix(nside)
	pix_area=hp.nside2pixarea(nside)
	
	##Reverse ordre of arrays to Simulate larger NSIDE maps first
	bands = bands[::-1]
	beam_fwhm= beam_fwhm[::-1]
	noise_const_temp = noise_const_temp[::-1]
	noise_const_pol = noise_const_pol[::-1]

	wl=np.array([299792458./(band*1e9) for band in bands])
	num_wl=len(wl)
	tqu_array=[]
	sigma_array=[]
	
	LFI=False
	LFI_IND=np.where(bands == 70)[0][0]

	cls=hp.read_cl(cl_file)
	print 'Generating Map'
	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*nside-1)
	hdu_sync=fits.open(synchrotron_file)
	sync_q=hdu_sync[1].data.field(0)
	sync_u=hdu_sync[1].data.field(1)
	
	sync_q=hp.reorder(sync_q,n2r=1)
	tmp_alm=hp.map2alm(sync_q)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
	sync_q=hp.alm2map(tmp_alm,nside)
	#sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
	sync_q=hp.ud_grade(sync_q,nside_out=nside)
	
	sync_u=hp.reorder(sync_u,n2r=1)
	tmp_alm=hp.map2alm(sync_u)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
	sync_u=hp.alm2map(tmp_alm,nside)
	#sync_u=hp.smoothing(sync_u,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
	sync_u=hp.ud_grade(sync_u,nside_out=nside)
	hdu_sync.close()
	

	bl_10=hp.gauss_beam(10*np.pi/(180.*60.),3*nside-1)
	hdu_dust=fits.open(dust_file)
	dust_q=hdu_dust[1].data.field(0)
	dust_u=hdu_dust[1].data.field(1)
	hdu_dust.close()
	
	dust_q=hp.reorder(dust_q,n2r=1)
	tmp_alm=hp.map2alm(dust_q)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
	dust_q=hp.alm2map(tmp_alm,nside)
	#dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True)
	dust_q=hp.ud_grade(dust_q,nside)
	
	dust_u=hp.reorder(dust_u,n2r=1)
	tmp_alm=hp.map2alm(dust_u)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
	dust_u=hp.alm2map(tmp_alm,nside)
	#dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True)
	dust_u=hp.ud_grade(dust_u,nside)

	nside=2048
	pix_area=hp.nside2pixarea(nside)
	
	prim=fits.PrimaryHDU()
	prim.header['COMMENT']="Simulated Planck Data with Polarization"
	prim.header['COMMENT']="Created using CAMB"
	#ipdb.set_trace()
	for i in range(num_wl):
		if LFI:
			nside=1024
			npix=hp.nside2npix(1024)
			simul_cmb=hp.ud_grade(simul_cmb,nside)
			alpha_radio=hp.ud_grade(alpha_radio,nside)
			sync_q=hp.ud_grade(sync_q,nside)
			sync_u=hp.ud_grade(sync_u,nside)
			dust_q=hp.ud_grade(sync_q,nside)
			dust_u=hp.ud_grade(sync_u,nside)
			pix_area=hp.nside2pixarea(nside)
		
		tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio);
		tmp_didqdu=np.array([np.random.normal(0,1,npix)*noise_const_temp[i], np.random.normal(0,1,npix)*noise_const_pol[i] , np.random.normal(0,1,npix)*noise_const_pol[i]])
		tmp_tqu=np.copy(tmp_cmb)
		
		#Add Polarized Foreground emission
		tmp_tqu[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q    )
		tmp_tqu[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u    )
	#	tmp_tqu[1]+= np.copy(sync_factor[i]*sync_q)
	#	tmp_tqu[2]+= np.copy(sync_factor[i]*sync_u)
		tmp_tqu=hp.sphtfunc.smoothing(tmp_tqu,fwhm=beam_fwhm[i]*np.pi/(180.*60.),pol=1)
	
		#Add Noise After smooothing
		#tmp_tqu+=tmp_didqdu 

		sig_hdu=fits.ImageHDU(tmp_tqu)
		sig_hdu.header['TFIELDS']=(len(tmp_tqu),'number of fields in each row')
		sig_hdu.header["TTYPE1"]=("STOKES I")
		sig_hdu.header["TTYPE2"]=("STOKES Q")
		sig_hdu.header["TTYPE3"]=("STOKES U")
		sig_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TFORM1"]='E'
		sig_hdu.header["TFORM2"]='E'
		sig_hdu.header["TFORM3"]='E'
		
		sig_hdu.header["EXTNAME"]="STOKES IQU"
		sig_hdu.header['POLAR']= 'T'
		sig_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		sig_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		sig_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		sig_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		sig_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		sig_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed')
		sig_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		sig_hdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		


		err_hdu=fits.ImageHDU(tmp_didqdu)
		err_hdu.header['TFIELDS']=(len(tmp_didqdu),'number of fields in each row')
		err_hdu.header["TTYPE1"]=("UNCERTAINTY I")
		err_hdu.header["TTYPE2"]=("UNCERTAINTY Q")
		err_hdu.header["TTYPE3"]=("UNCERTAINTY U")
		err_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TFORM1"]='E'
		err_hdu.header["TFORM2"]='E'
		err_hdu.header["TFORM3"]='E'
		
		err_hdu.header["EXTNAME"]="UNCERTAINTIES"
		err_hdu.header['POLAR']= 'T'
		err_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		err_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		err_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		err_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		err_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		err_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed')
		err_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		err_hdu.header["COORDSYS"]=('G','Pixelization coordinate system')


	#	ipdb.set_trace()
		tblist=fits.HDUList([prim,sig_hdu,err_hdu])
		tblist.writeto(output_prefix+'planck_simulated_{0:0>3.0f}.fits'.format(bands[i]),clobber=True)
		print "planck_simulated_{:0>3.0f}.fits".format(bands[i])
		print "Nside = {:0>4d}".format(nside)
		if i+1 == LFI_IND:
			LFI=True
l,m = hp.Alm.getlm(lmax)
n_lm = len(l)
ell = np.arange(0,lmax+1)

below_horizon = np.where(theta > np.pi/2.)[0]

nu = np.linspace(100,200,num=203)*u.MHz

# Define primary beam
#A = np.exp(-np.power(theta,2)/(2.*0.1))
#A = np.exp(-np.power(theta,2)/(2.*0.1))
A = np.ones_like(theta)
A[below_horizon] = 0.
a_lm = hp.map2alm(A,lmax=lmax)
Cl_A = hp.alm2cl(a_lm,lmax=lmax)
Cl_G = hp.gauss_beam(np.radians(90.),lmax)
A_nu = np.outer(A,np.ones_like(nu))

#apod = np.ones_like(theta)
#apod = np.exp(-np.power(theta,2)/(2.*0.5))
#disc = hp.query_disc(nside,[0,0,1],np.radians(80.))
#apod[disc] = 0.
#apod = 1-np.exp(-np.power(theta,2)/(2.*0.5))
#apod = 
#apod = np.power(np.sin(phi),2)

# Define fringe 
bmag = 30.
bvec = np.array([0,1,0])*bmag
b = np.outer(bvec,np.ones(npix))*u.m
s = np.array(hp.pix2vec(nside,ipix))
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands)

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	
	q_fwhm=[27.3,11.7]
	noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(1024)
	sigma_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	sigma_j=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False):
	print "Computer Cross Correlations for Bands "+str(bands)

	temperature_file='/data/Planck/COM_CompMap_CMB-smica_2048.fits'
	planck_T=hp.read_map(temperature_file)
	planck_T*=1e-6

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	#mask_hdu=fits.open('/data/wmap/wmap_polarization_analysis_mask_r9_9yr_v5.fits')
	#mask=mask_hdu[1].data.field(0)
	#mask_hdu.close()
	#mask=hp.reorder(mask,n2r=1)
	#mask=hdu_i['mask'].data
	#mask=hp.ud_grade(mask,nside_out=128)
	#pix=np.where(mask != 0)
	#pix=np.array(pix).reshape(len(pix[0]))
	#pix_bad=np.where(mask == 0)
	field_pixels=hdu_i['FIELD PIXELS'].data
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	hdu_i.close()
	hdu_j.close()
	
	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=128,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=128,order_in='ring')
	planck_T=hp.ud_grade(planck_T,nside_out=128,order_in='ring')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.pi/180.,lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.pi/180.,lmax=383)
	planck_T=hp.smoothing(planck_T,fwhm=np.pi/180.,lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
	weights=np.repeat(1,len(P))	
	num,bins,junk=plt.hist(P,bins=40)
	index=np.argmax(num)
	weights[np.where(P <= bins[index+1]/2.)]=.75
	weights[np.where(P <= bins[index+1]/4.)]=.5
	weights[np.where(P <= bins[index+1]/8.)]=.25
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const*weights
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const*weights
	alpha_u=alpha_radio*iqu_band_j[2]*weights
	alpha_q=-alpha_radio*iqu_band_j[1]*weights

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	if beam:
		l=np.arange(3*128)
		Bl_60=np.exp(-l*(l+1)*((60.0*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_11=np.exp(-l*(l+1)*((11.7*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_27=np.exp(-l*(l+1)*((27.3*np.pi/(180.*60.)/(np.sqrt(8.0*np.log(2.))))**2)/2.)
		Bl_factor=Bl_60**2*Bl_11*Bl_27
	else:
		Bl_factor=hp.gauss_beam(11.7*np.pi/(180.*60),lmax=383)*hp.gauss_beam(27.3*np.pi/(180.*60.),lmax=383)

	for field1 in xrange(4):
		mask_bool1=np.repeat(True,len(Delta_Q))
		pix_cmb1=field_pixels.field(field1)	
		pix_cmb1=pix_cmb1[np.nonzero(pix_cmb1)]	##Take Pixels From Field 1
		tmp=np.zeros(hp.nside2npix(1024))
		tmp[pix_cmb1]=1
		tmp=hp.ud_grade(tmp,128)
		mask_bool1[np.nonzero(tmp)]=False
	#	mask_bool1[np.where(P<.7e-6)]=True
		DQm.mask=mask_bool1
		DUm.mask=mask_bool1
		aQm.mask=mask_bool1
		aUm.mask=mask_bool1

		TE_map=np.array([planck_T*alpha_radio,Delta_Q,Delta_U])
		TEm=hp.ma(TE_map)
		TEm[0].mask=mask_bool1
		TEm[1].mask=mask_bool1
		TEm[2].mask=mask_bool1
		
		cross1_array.append(hp.anafast(DQm,map2=aUm)/Bl_factor)
		cross2_array.append(hp.anafast(DUm,map2=aQm)/Bl_factor)
		cross_tmp=hp.anafast(TEm,pol=1,nspec=4)
		cross3_array.append(cross_tmp[-1]/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	cross3=np.mean(cross3_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	hp.write_cl('cl_'+bands+'_FR_TE_cmb.fits',cross3)
	return (cross1,cross2,cross3)
Example #50
0
def get_bl(beam_FWHM_amin,ell_max = 20000):
    """ Transfer function """
    return hp.gauss_beam(beam_FWHM_amin * np.pi / 180. / 60., lmax= ell_max)
local_nl = order.local_size()
print("rank", rank, "local_nl", local_nl, "mval", order.mval())

mpi_comm = MPI.COMM_WORLD if mpi else None

# map2alm
# maps in libsharp are 3D, 2nd dimension is IQU, 3rd is pixel

alm_sharp_I = libsharp.analysis(grid, order,
                                np.ascontiguousarray(local_map[0].reshape((1, 1, -1))),
                                spin=0, comm=mpi_comm)
alm_sharp_P = libsharp.analysis(grid, order,
                                np.ascontiguousarray(local_map[1:].reshape((1, 2, -1))),
                                spin=2, comm=mpi_comm)

beam = hp.gauss_beam(fwhm=np.radians(fwhm_deg), lmax=lmax, pol=True)

print("Smooth")
# smooth in place (zonca implemented this function)
order.almxfl(alm_sharp_I, np.ascontiguousarray(beam[:, 0:1]))
order.almxfl(alm_sharp_P, np.ascontiguousarray(beam[:, (1, 2)]))

# alm2map

new_local_map_I = libsharp.synthesis(grid, order, alm_sharp_I, spin=0, comm=mpi_comm)
new_local_map_P = libsharp.synthesis(grid, order, alm_sharp_P, spin=2, comm=mpi_comm)

# Transfer map to first process for writing

local_full_map = np.zeros(input_map.shape, dtype=np.float64)
local_full_map[0, local_pix] = new_local_map_I
Example #52
0
hp.mollview(radio_map, norm='hist', unit='$K_{CMB}$')
plt.savefig('mwa_1hr_raw.png', format='png')
plt.close()

cross_cls = hp.anafast(cmb_map,radio_fr)
radio_cls = hp.anafast(radio_fr)
cmb_cls = hp.anafast(cmb_map)



lmax = len(cross_cls)
beam_lmax = lmax
l = np.arange(2,beam_lmax)
ll = l*(l+1)/(2*np.pi)
beam_3 = hp.gauss_beam(3.*np.pi/(180.*60.),beam_lmax-1)[2:]
beam_5 = hp.gauss_beam(5.*np.pi/(180.*60.),beam_lmax-1)[2:]
pix = hp.pixwin(256)[2:beam_lmax]

theory_cls= hp.read_cl(theory_cl_file)
theory_cls=theory_cls[0][2:beam_lmax]
#theory_cls[:2]=1e-10

cross_cls = cross_cls[2:beam_lmax]
radio_cls = radio_cls[2:beam_lmax]
cmb_cls = cmb_cls[2:beam_lmax]

wls = hp.anafast((~radio_fr.mask).astype(float))[:beam_lmax]
fskyw2 = np.sum([(2*m+1)*wls[mi] if m != 0 else 0 for mi,m in enumerate(xrange(len(wls)))])/(4*np.pi)

fsky = 1. - np.sum(mask_bool).astype(float)/len(mask_bool)
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
#	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2
	#bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2
	N_runs=500
	bins=[1,5,10,20,50]

	map_prefix='/home/matt/quiet/quiet_maps/'
	i_file=map_prefix+'quiet_simulated_43.1'
	j_file=map_prefix+'quiet_simulated_94.5'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	bands=[43.1,94.5]
	names=['43','95']
	wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[]
	cross2_array_in=[]
	Ndq_array_in=[]
	Ndu_array_in=[]
	Nau_array_in=[]
	Naq_array_in=[]
	noise1_array_in=[]
	noise2_array_in=[]
	theory1_array_in=[]
	theory2_array_in=[]
	

	#simulate_fields.main()
	ttmp1,ttmp2=faraday_theory_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	theory1_array_in.append(ttmp1)
	theory2_array_in.append(ttmp2)
	#for n in xrange(N_runs):
	for i in xrange(N_runs):	
		print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(i+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
		tmp1,tmp2,n1,n2,n3,n4=faraday_correlate_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
	#	ntmp1,ntmp2=faraday_noise_quiet(i_file+'.fits',j_file+'.fits',wl[0],wl[1],alpha_file,names[0]+'x'+names[1],beam=use_beam)
		cross1_array_in.append(tmp1)
		cross2_array_in.append(tmp2)
		Ndq_array_in.append(n1)
		Ndu_array_in.append(n2)
		Nau_array_in.append(n3)
		Naq_array_in.append(n4)
	#	noise1_array_in.append(ntmp1)
	#	noise2_array_in.append(ntmp2)


	f=open('cl_theory_FR_QxaU.json','w')
	json.dump(np.array(theory1_array_in).tolist(),f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','w')
	json.dump(np.array(theory2_array_in).tolist(),f)
	f.close()	
	theory1=np.mean(theory1_array_in,axis=0)
	theory2=np.mean(theory2_array_in,axis=0)
	hp.write_cl('cl_theory_FR_QxaU.fits',theory1)
	hp.write_cl('cl_theory_FR_UxaQ.fits',theory2)
	#f=open('cl_theory_FR_QxaU.json','r')
	#theory1_array=json.load(f)
	#f.close()	
	#f=open('cl_theory_FR_UxaQ.json','r')
	#theory2_array=json.load(f)
	#f.close()	
	f=open('cl_array_FR_QxaU.json','w')
	json.dump(np.array(cross1_array_in).tolist(),f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','w')
	json.dump(np.array(cross2_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndq_FR_QxaU.json','w')
	json.dump(np.array(Ndq_array_in).tolist(),f)
	f.close()	
	f=open('cl_Ndu_FR_UxaQ.json','w')
	json.dump(np.array(Ndu_array_in).tolist(),f)
	f.close()	
	f=open('cl_Nau_FR_QxaU.json','w')
	json.dump(np.array(Nau_array_in).tolist(),f)
	f.close()	
	f=open('cl_Naq_FR_UxaQ.json','w')
	json.dump(np.array(Naq_array_in).tolist(),f)
	f.close()	
	#f=open('cl_noise_FR_QxaU.json','w')
	#json.dump(np.array(noise1_array_in).tolist(),f)
	#f.close()	
	#f=open('cl_noise_FR_UxaQ.json','w')
	#json.dump(np.array(noise2_array_in).tolist(),f)
	#f.close()	
	bins=[1,5,10,20,25,50]
	fsky=225.*(np.pi/180.)**2/(4*np.pi)
	l=np.arange(len(cross1_array_in[0]))
	ll=l*(l+1)/(2*np.pi)
	L=np.sqrt(fsky*4*np.pi)
	dl_eff=2*np.pi/L
	
        theory1_array_in=np.array(theory1_array_in)/(fsky*bls)
	theory2_array_in=np.array(theory2_array_in)/(fsky*bls)
	cross1_array_in=np.array(cross1_array_in)/(fsky*bls)
	cross2_array_in=np.array(cross2_array_in)/(fsky*bls)
	Ndq_array_in=np.array(Ndq_array_in)/(fsky)
	Ndu_array_in=np.array(Ndu_array_in)/(fsky)
	Nau_array_in=np.array(Nau_array_in)/(fsky)
	Naq_array_in=np.array(Naq_array_in)/(fsky)
	#noise1_array_in=np.array(noise1_array_in)/(fsky*bls)
	#noise2_array_in=np.array(noise2_array_in)/(fsky*bls)

	Ndq_array_in.shape += (1,)
	Ndu_array_in.shape += (1,)
	Nau_array_in.shape += (1,)
	Naq_array_in.shape += (1,)


	for b in bins:
		theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
	#	N_dq=np.mean(Ndq_array_in)
	#	N_au=np.mean(Nau_array_in)
	#	#delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#
		cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2)

	#	N_du=np.mean(Ndu_array_in)
	#	N_aq=np.mean(Naq_array_in)
	#	#delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2)

        	theory1_array=[]
        	theory2_array=[]
        	cross1_array=[]
        	cross2_array=[]
        #	noise1_array=[]
        #	noise2_array=[]
                    	
            	Ndq_array=[]
        	Ndu_array=[]
        	Nau_array=[]
        	Naq_array=[]
        	
		plot_l=[]
		if( b != 1):
	        	tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b)
	        	tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b)
			tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b)
	        	tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b)
		#	tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b)
	        #	tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b)
	        	
			theory1_array=tmp_t1['llcl']
			theory2_array=tmp_t2['llcl']
                        theory1_array.shape += (1,)
                        theory2_array.shape += (1,)
                        theory1_array=theory1_array.T
                        theory2_array=theory2_array.T
			plot_l= tmp_t1['l_out']
			cross1_array=tmp_c1['llcl']
			cross2_array=tmp_c2['llcl']
			
		#	noise1_array=tmp_n1['llcl']
		#	noise2_array=tmp_n2['llcl']
	        	
			Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl']
			Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl']
			Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl']
			Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl']
			tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b)
			#tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b)
		
			cosmic1=np.sqrt(tmp_c1['llcl'])
			#delta1=np.sqrt(tmp_d1['llcl'])

			tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b)
			#tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b)
			cosmic2=np.sqrt(tmp_c2['llcl'])
			#delta2=np.sqrt(tmp_d2['llcl'])
			t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b)
			theory_cls=t_tmp['llcl']
		else:
			plot_l=l
			theory1_array=np.multiply(ll,theory1_array_in)
			cross1_array=np.multiply(ll,cross1_array_in)
		#	noise1_array=np.multiply(ll,noise1_array_in)
			theory2_array=np.multiply(ll,theory2_array_in)
			cross2_array=np.multiply(ll,cross2_array_in)
		#	noise2_array=np.multiply(ll,noise2_array_in)
			cosmic1*=ll
			cosmic2*=ll
			#delta1*=ll
			#delta2*=ll
			Ndq_array=np.multiply(ll,Ndq_array_in)
			Ndu_array=np.multiply(ll,Ndu_array_in)
			Naq_array=np.multiply(ll,Naq_array_in)
			Nau_array=np.multiply(ll,Nau_array_in)
			theory_cls*=ll
		#ipdb.set_trace()
		bad=np.where(plot_l < 24)
		N_dq=np.mean(Ndq_array,axis=0)
		N_du=np.mean(Ndu_array,axis=0)
		N_aq=np.mean(Naq_array,axis=0)
		N_au=np.mean(Nau_array,axis=0)
		#noise1=np.mean(noise1_array,axis=0)
		#noise2=np.mean(noise2_array,axis=0)
		theory1=np.mean(theory1_array,axis=0)
		theory2=np.mean(theory1_array,axis=0)
        	theory_array = np.add(theory1_array,theory2_array)
        	theory=np.mean(theory_array,axis=0)
        	#dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1))
        	#cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2))
        	cross_array = np.add(cross1_array,cross2_array)
        	cross=np.mean(cross_array,axis=0)
        	#dcross=np.std(cross_array,axis=0,ddof=1)
        	dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1)))
        	cosmic=np.sqrt(cosmic1**2+cosmic2**2)
	
		delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.))
		delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.))
        	delta=np.sqrt(delta1**2+delta2**2)
		#cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2)))
		#theory1=np.mean(theory1_array,axis=0)
		#dtheory1=np.std(theory1_array,axis=0,ddof=1)
		#cross1=np.mean(cross1_array,axis=0)
		#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
		#ipdb.set_trace()
		plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

		#theory2=np.mean(theory2_array,axis=0)
		#dtheory2=np.std(theory2_array,axis=0,ddof=1)
		#cross2=np.mean(cross2_array,axis=0)
		##delta2=np.mean(delta2_array,axis=0)
		#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
		##ipdb.set_trace()
		#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
		#ipdb.set_trace()
    
		if b == 25 :
                        good_l=np.logical_and(plot_l <= 200,plot_l >25)
			likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr')

		#if b == 1 :
		#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
		#	vector=np.matrix(ll[1:]*cross[1:]).T
		#	mu=np.matrix(ll[1:]*theory[1:]).T
		#	fact=len(xbar)-1
		#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
		##	ipdb.set_trace()
		#	U,S,V =np.linalg.svd(cov)
		#	_cov= np.einsum('ij,j,jk', V.T,1./S,U.T)
		#	likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S)))
		##	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
		#	f=open('FR_likelihood.txt','w')
		#	f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0]))
		#	f.close()

	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands_name)

#	radio_file='/data/wmap/faraday_MW_realdata.fits'
#	cl_file='/home/matt/wmap/simul_scalCls.fits'
#	nside=1024
#	npix=hp.nside2npix(nside)
#	
#	cls=hp.read_cl(cl_file)
#	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
#	
#	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
#	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
#	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
#	wl=np.array([299792458./(band*1e9) for band in bands])
#	num_wl=len(wl)
#	t_array=np.zeros((num_wl,npix))	
#	q_array=np.zeros((num_wl,npix))
#	u_array=np.zeros((num_wl,npix))
#	for i in range(num_wl):
#		tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio);
#		t_array[i],q_array[i],u_array[i]=tmp_cmb
#	iqu_band_i=[t_array[0],q_array[0],u_array[0]]	
#	iqu_band_j=[t_array[1],q_array[1],u_array[1]]	


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
	
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)

	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
def plot_mc():

	f=open('cl_theory_FR_QxaU.json','r')
	theory1_array_in=json.load(f)
	f.close()	
	f=open('cl_theory_FR_UxaQ.json','r')
	theory2_array_in=json.load(f)
	f.close()	
	f=open('cl_array_FR_QxaU.json','r')
	cross1_array_in=json.load(f)
	f.close()	
	f=open('cl_array_FR_UxaQ.json','r')
	cross2_array_in=json.load(f)
	f.close()	
#	f=open('cl_noise_FR_QxaU.json','r')
#	noise1_array_in=json.load(f)
#	f.close()	
#	f=open('cl_noise_FR_UxaQ.json','r')
#	noise2_array_in=json.load(f)
#	f.close()	
	f=open('cl_Nau_FR_QxaU.json','r')
	Nau_array_in=json.load(f)
	f.close()	
	f=open('cl_Ndq_FR_QxaU.json','r')
	Ndq_array_in=json.load(f)
	f.close()	
	f=open('cl_Naq_FR_UxaQ.json','r')
	Naq_array_in=json.load(f)
	f.close()	
	f=open('cl_Ndu_FR_UxaQ.json','r')
	Ndu_array_in=json.load(f)
	f.close()	
	
	bins=[1,5,10,20,25,50]
	N_runs=500
#	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),383)**2
	#bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	bls=(hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)*hp.pixwin(nside_out)[:3*nside_out])**2
	#bls=np.repeat(1,3*nside_out)
	fsky=225.*(np.pi/180.)**2/(4*np.pi)
	l=np.arange(len(cross1_array_in[0]))
	ll=l*(l+1)/(2*np.pi)
	L=np.sqrt(fsky*4*np.pi)
	dl_eff=2*np.pi/L

        theory1_array_in=np.array(theory1_array_in)/(fsky*bls)
	theory2_array_in=np.array(theory2_array_in)/(fsky*bls)
	cross1_array_in=np.array(cross1_array_in)/(fsky*bls)
	cross2_array_in=np.array(cross2_array_in)/(fsky*bls)
	Ndq_array_in=np.array(Ndq_array_in)/(fsky)
	Ndu_array_in=np.array(Ndu_array_in)/(fsky)
	Nau_array_in=np.array(Nau_array_in)/(fsky)
	Naq_array_in=np.array(Naq_array_in)/(fsky)
	#noise1_array_in=np.array(noise1_array_in)/(fsky*bls)
	#noise2_array_in=np.array(noise2_array_in)/(fsky*bls)

	Ndq_array_in.shape += (1,)
	Ndu_array_in.shape += (1,)
	Nau_array_in.shape += (1,)
	Naq_array_in.shape += (1,)


	for b in bins:
		theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')
	#	N_dq=np.mean(Ndq_array_in)
	#	N_au=np.mean(Nau_array_in)
	#	#delta1=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))**2+(np.mean(cross1_array_in,axis=0)-np.mean(noise1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta1=np.sqrt(2.*((np.mean(theory1_array_in,axis=0))**2+(np.mean(theory1_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#
		cosmic1=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=0)**2)

	#	N_du=np.mean(Ndu_array_in)
	#	N_aq=np.mean(Naq_array_in)
	#	#delta2=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))**2+(np.mean(cross2_array_in,axis=0)-np.mean(noise2_array_in,axis=0))/2.*(N_dq+N_au)+N_dq*N_au/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
	#	delta2=np.sqrt(2.*((np.mean(theory2_array_in,axis=0))**2+(np.mean(theory2_array_in,axis=0))/2.*(N_du+N_aq)+N_du*N_aq/2.)/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		cosmic2=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=0)**2)

        	theory1_array=[]
        	theory2_array=[]
        	cross1_array=[]
        	cross2_array=[]
        #	noise1_array=[]
        #	noise2_array=[]
                    	
            	Ndq_array=[]
        	Ndu_array=[]
        	Nau_array=[]
        	Naq_array=[]
        	
		plot_l=[]
		if( b != 1):
	        	tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in,b)
	        	tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in,b)
			tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in,b)
	        	tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in,b)
		#	tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in,b)
	        #	tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in,b)
	        	
			theory1_array=tmp_t1['llcl']
			theory2_array=tmp_t2['llcl']
                        theory1_array.shape += (1,)
                        theory2_array.shape += (1,)
                        theory1_array=theory1_array.T
                        theory2_array=theory2_array.T
			plot_l= tmp_t1['l_out']
			cross1_array=tmp_c1['llcl']
			cross2_array=tmp_c2['llcl']
			
		#	noise1_array=tmp_n1['llcl']
		#	noise2_array=tmp_n2['llcl']
	        	
			Ndq_array=bin_llcl.bin_llcl(ll*Ndq_array_in,b)['llcl']
			Ndu_array=bin_llcl.bin_llcl(ll*Ndu_array_in,b)['llcl']
			Naq_array=bin_llcl.bin_llcl(ll*Naq_array_in,b)['llcl']
			Nau_array=bin_llcl.bin_llcl(ll*Nau_array_in,b)['llcl']
			tmp_c1=bin_llcl.bin_llcl((ll*cosmic1)**2,b)
			#tmp_d1=bin_llcl.bin_llcl((ll*delta1)**2,b)
		
			cosmic1=np.sqrt(tmp_c1['llcl'])
			#delta1=np.sqrt(tmp_d1['llcl'])

			tmp_c2=bin_llcl.bin_llcl((ll*cosmic2)**2,b)
			#tmp_d2=bin_llcl.bin_llcl((ll*delta2)**2,b)
			cosmic2=np.sqrt(tmp_c2['llcl'])
			#delta2=np.sqrt(tmp_d2['llcl'])
			t_tmp=bin_llcl.bin_llcl(ll*theory_cls,b)
			theory_cls=t_tmp['llcl']
		else:
			plot_l=l
			theory1_array=np.multiply(ll,theory1_array_in)
			cross1_array=np.multiply(ll,cross1_array_in)
		#	noise1_array=np.multiply(ll,noise1_array_in)
			theory2_array=np.multiply(ll,theory2_array_in)
			cross2_array=np.multiply(ll,cross2_array_in)
		#	noise2_array=np.multiply(ll,noise2_array_in)
			cosmic1*=ll
			cosmic2*=ll
			#delta1*=ll
			#delta2*=ll
			Ndq_array=np.multiply(ll,Ndq_array_in)
			Ndu_array=np.multiply(ll,Ndu_array_in)
			Naq_array=np.multiply(ll,Naq_array_in)
			Nau_array=np.multiply(ll,Nau_array_in)
			theory_cls*=ll
		#ipdb.set_trace()
		bad=np.where(plot_l < 24)
		N_dq=np.mean(Ndq_array,axis=0)
		N_du=np.mean(Ndu_array,axis=0)
		N_aq=np.mean(Naq_array,axis=0)
		N_au=np.mean(Nau_array,axis=0)
		#noise1=np.mean(noise1_array,axis=0)
		#noise2=np.mean(noise2_array,axis=0)
		theory1=np.mean(theory1_array,axis=0)
		theory2=np.mean(theory1_array,axis=0)
        	theory_array = np.add(theory1_array,theory2_array)
        	theory=np.mean(theory_array,axis=0)
        	#dtheory=np.sqrt(np.var(theory1_array,ddof=1) + np.var(theory2_array,ddof=1))
        	#cross_array = np.add(np.subtract(cross1_array,noise1),np.subtract(cross2_array,noise2))
        	cross_array = np.add(cross1_array,cross2_array)
        	cross=np.mean(cross_array,axis=0)
        	#dcross=np.std(cross_array,axis=0,ddof=1)
        	dcross=np.sqrt( ( np.var(cross1_array,axis=0,ddof=1) + np.var(cross2_array,axis=0,ddof=1)))
        	cosmic=np.sqrt(cosmic1**2+cosmic2**2)
	
		delta1=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory1**2 + theory1*(N_dq+N_au)/2. + N_dq*N_au/2.))
		delta2=np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(b**2+dl_eff**2))*(theory2**2 + theory2*(N_du+N_aq)/2. + N_du*N_aq/2.))
        	delta=np.sqrt(delta1**2+delta2**2)
		#cosmic=np.abs(theory_cls)*np.sqrt(2./((2*plot_l+1)*fsky*np.sqrt(dl_eff**2+b**2)))
		#theory1=np.mean(theory1_array,axis=0)
		#dtheory1=np.std(theory1_array,axis=0,ddof=1)
		#cross1=np.mean(cross1_array,axis=0)
		#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
		#ipdb.set_trace()
                good_l=np.logical_and(plot_l <= 250,plot_l >25)
		plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'Cross_43x95_FR', title='QUIET FR Correlator',theory=theory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

		#theory2=np.mean(theory2_array,axis=0)
		#dtheory2=np.std(theory2_array,axis=0,ddof=1)
		#cross2=np.mean(cross2_array,axis=0)
		##delta2=np.mean(delta2_array,axis=0)
		#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
		##ipdb.set_trace()
		#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
		#ipdb.set_trace()
    
		if b == 25 :
                        good_l=np.logical_and(plot_l <= 250,plot_l >25)
			likelihood(cross[good_l],delta[good_l],theory[good_l],'field1','c2bfr')

		#if b == 1 :
		#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
		#	vector=np.matrix(ll[1:]*cross[1:]).T
		#	mu=np.matrix(ll[1:]*theory[1:]).T
		#	fact=len(xbar)-1
		#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
		##	ipdb.set_trace()
		#	U,S,V =np.linalg.svd(cov)
		#	_cov= np.einsum('ij,j,jk', V.T,1./S,U.T)
		#	likelhd=np.exp(-np.dot(np.dot((vector-mu).T,_cov),(vector-mu))/2. )/(np.sqrt(2*np.pi*np.prod(S)))
		##	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
		#	f=open('FR_likelihood.txt','w')
		#	f.write('Likelihood of fit is #{0:.5f}'.format(likelhd[0,0]))
		#	f.close()

	subprocess.call('mv *01*.png bin_01/', shell=True)
	subprocess.call('mv *05*.png bin_05/', shell=True)
	subprocess.call('mv *10*.png bin_10/', shell=True)
	subprocess.call('mv *20*.png bin_20/', shell=True)
	subprocess.call('mv *25*.png bin_25/', shell=True)
	subprocess.call('mv *50*.png bin_50/', shell=True)
	subprocess.call('mv *.eps eps/', shell=True)
Example #56
0
hp.mollview(radio_map, norm='hist', unit='$K_{CMB}$')
plt.savefig('chipass_raw.png', format='png')
plt.close()

cross_cls = hp.anafast(cmb_map,radio_fr)
radio_cls = hp.anafast(radio_fr)
cmb_cls = hp.anafast(cmb_map)



lmax = len(cross_cls)
beam_lmax = lmax
l = np.arange(beam_lmax)
ll = l*(l+1)/(2*np.pi)
beam_14 = hp.gauss_beam(14.4*np.pi/(180.*60.),beam_lmax-1)
beam_5 = hp.gauss_beam(5.*np.pi/(180.*60.),beam_lmax-1)
b14_180 = hp.gauss_beam(np.sqrt((3*60.)**2 - (14.4)**2)*np.pi/(180.*60.),beam_lmax-1)
b5_180 = hp.gauss_beam(np.sqrt((3*60.)**2 - (5.)**2)*np.pi/(180.*60.),beam_lmax-1)

beam_14 *= (1. - b14_180)
beam_5 *= (1. - b5_180)

pix = hp.pixwin(256)[:beam_lmax]

theory_cls= hp.read_cl(theory_cl_file)
theory_cls=theory_cls[0][:beam_lmax]
#theory_cls[:2]=1e-10

cross_cls = cross_cls[:beam_lmax]
radio_cls = radio_cls[:beam_lmax]
Example #57
0
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
	N_runs=100
	bins=[1,5,10,20,25,50]
	gal_cut=[00,05,10,20,30]
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2*np.pi)

	map_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	file_prefix=map_prefix+'prism_simulated_'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	#wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[[],[],[]]
	cross2_array_in=[[],[],[]]
	Ndq_array_in=[[],[],[]]
	Ndu_array_in=[[],[],[]]
	Nau_array_in=[[],[],[]]
	Naq_array_in=[[],[],[]]
	noise1_array_in=[[],[],[]]
	noise2_array_in=[[],[],[]]
	theory1_array_in=[[],[],[]]
	theory2_array_in=[[],[],[]]
	

	#simulate_fields.main()
	for num, mask_file in enumerate(mask_array):
		print(Fore.WHITE+Back.RED+Style.BRIGHT+'Mask: '+mask_name[num]+Back.RESET+Fore.RESET+Style.RESET_ALL)
		count=0
		for i in [0,1,2]:
			for j in [3,4,5]:
				#for n in xrange(N_runs):
				for run in xrange(N_runs):	
					print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(run+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
					print('Bands: {0:0>3.0f} and {1:0>3.0f}'.format(bands[i],bands[j]))
					ttmp1,ttmp2=correlate_theory(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
					theory1_array_in[count].append(ttmp1)
					theory2_array_in[count].append(ttmp2)
					tmp1,tmp2,n1,n2,n3,n4=correlate_signal(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					ntmp1,ntmp2=correlate_noise(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					cross1_array_in[count].append(tmp1)
					cross2_array_in[count].append(tmp2)
					Ndq_array_in[count].append(n1)
					Ndu_array_in[count].append(n2)
					Nau_array_in[count].append(n3)
					Naq_array_in[count].append(n4)
					noise1_array_in[count].append(ntmp1)
					noise2_array_in[count].append(ntmp2)
				count+=1
		np.savez('prism_simul_'+mask_name[num]+'.npz',the1_in=theory1_array_in,the2_in=theory2_array_in,c1_in=cross1_array_in,c2_in=cross2_array_in,ndq_in=Ndq_array_in,ndu_in=Ndu_array_in,nau_in=Nau_array_in,naq_in=Naq_array_in,n1_in=noise1_array_in,n2_in=noise2_array_in)
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(noise1_array_in).tolist(),f)
				#f.close()	
				#json.dump(np.array(noise2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Nau_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Nau_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Naq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Naq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndu_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndu_array_in).tolist(),f)
				#f.close()	
			
				#fsky= 1. - np.sin(cut*np.pi/180.)
				#L=np.sqrt(fsky*4*np.pi)
				#dl_eff=2*np.pi/L

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
Example #58
0
 def beam_cl(self):
     return gauss_beam(self.beam_FWHM_amin * np.pi / 180. / 60., lmax=self.lmax_grid())
Example #59
0
def correlate_signal(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Cross Correlations for Bands "+str(bands)


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	

	ind_i=np.argwhere( wl == wl_i)[0][0]
	ind_j=np.argwhere( wl == wl_j)[0][0]
	npix_i=hp.nside2npix(nside_i)	
	npix_j=hp.nside2npix(nside_j)	
	#ipdb.set_trace()
	if npix_i != iqu_band_i[1].shape[0]:
		print 'NSIDE parameter not equal to size of map for file I'
		print 'setting npix to larger parameter'
		npix_i=iqu_band_i[1].shape[0]
	
	if npix_j != iqu_band_j[1].shape[0]:
		print 'NSIDE parameter not equal to size of map for file J'
		print 'setting npix to larger parameter'
		npix_j=iqu_band_j[1].shape[0]

	sigma_i=[noise_const_pol[ind_i]*np.random.normal(0,1,npix_i),noise_const_pol[ind_i]*np.random.normal(0,1,npix_i)]
	sigma_j=[noise_const_pol[ind_j]*np.random.normal(0,1,npix_j),noise_const_pol[ind_j]*np.random.normal(0,1,npix_j)]
	
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	
	sigma_q_i=hp.smoothing(sigma_i[0],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_i[1],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_j[0],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_j[1],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)	

	sigma_q_i=hp.ud_grade(sigma_q_i,nside_out)
	sigma_u_i=hp.ud_grade(sigma_u_i,nside_out)
	sigma_q_j=hp.ud_grade(sigma_q_j,nside_out)
	sigma_u_j=hp.ud_grade(sigma_u_j,nside_out)
		
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	sqi=hp.ma(sigma_q_i)
	sui=hp.ma(sigma_u_i)
	sqj=hp.ma(sigma_q_j)
	suj=hp.ma(sigma_u_j)
	salpha=hp.ma(delta_alpha_radio)
	alpham=hp.ma(alpha_radio)
	um=hp.ma(iqu_band_j[2])
	qm=hp.ma(iqu_band_j[1])
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	sqi.mask=mask_bool
	sui.mask=mask_bool
	sqj.mask=mask_bool
	suj.mask=mask_bool
	salpha.mask=mask_bool
	alpham.mask=mask_bool
	um.mask=mask_bool
	qm.mask=mask_bool
	#ipdb.set_trace()
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	

	##calculate theoretical variance for correlations
	N_dq=abs((sqi-sqj)**2).sum()*(pix_area/const)**2/(4.*np.pi)
	N_du=abs((sui-suj)**2).sum()*(pix_area/const)**2/(4.*np.pi)
	N_au=abs((salpha*um+alpham*suj+salpha*suj)**2).sum()*pix_area**2/(4.*np.pi)
	N_aq=abs((salpha*qm+alpham*sqj+salpha*sqj)**2).sum()*pix_area**2/(4.*np.pi)
	#ipdb.set_trace()

	return (cross1,cross2,N_dq,N_du,N_au,N_aq)
Example #60
0
def main():	
	t1=time()
	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits'
	output_prefix='/home/matt/quiet/quiet_maps/'
	nside=1024
	nside_in=1024
	npix=hp.nside2npix(nside)
	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
	pix_area= np.sqrt(hp.nside2pixarea(1024))*60*180./np.pi
	noise_const_q=np.array([36./pix_area for f in q_fwhm])*1e-6
#	noise_const_q=np.array([36./fwhm for fwhm in q_fwhm])*1e-6
	centers=np.array([convertcenter([12,4],-39),convertcenter([5,12],-39),convertcenter([0,48],-48),convertcenter([22,44],-36)])
	wl=np.array([299792458./(band*1e9) for band in bands])
	
	synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits'
	dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits'
	dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits'
	dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits'
	
	##Dust intensity scaling factor
	hdu_dust_t=fits.open(dust_t_file)
	dust_t=hdu_dust_t[1].data.field('TEMP_ML')
	hdu_dust_t.close()
	
	dust_t=hp.reorder(dust_t,n2r=1)
	dust_t=hp.ud_grade(dust_t,nside_in)
	
	hdu_dust_b=fits.open(dust_b_file)
	dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL')
	hdu_dust_b.close
	
	dust_beta=hp.reorder(dust_beta,n2r=1)	
	dust_beta=hp.ud_grade(dust_beta,nside_in)
	
	gamma_dust=6.626e-34/(1.38e-23*dust_t)

	krj_to_kcmb=np.array([1.,1.])
	sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in bands])
	dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353.)**(1+dust_beta) for i,x in enumerate(bands)])

	print('Preparing Foregrounds')	
        bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*1024-1)
        bl_10=hp.gauss_beam(10.*np.pi/(180.*60.),3*1024-1)
        
        hdu_sync=fits.open(synchrotron_file)
        sync_q=hdu_sync[1].data.field(0)
        sync_u=hdu_sync[1].data.field(1)
        
        sync_q=hp.reorder(sync_q,n2r=1)
        tmp_alm=hp.map2alm(sync_q)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
        #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
        sync_q=hp.alm2map(tmp_alm,nside_in,verbose=False)
        
        sync_u=hp.reorder(sync_u,n2r=1)
        tmp_alm=hp.map2alm(sync_u)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
        #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
        sync_u=hp.alm2map(tmp_alm,nside_in,verbose=False)
        hdu_sync.close()
        
        hdu_dust=fits.open(dust_file)
        dust_q=hdu_dust[1].data.field(0)
        dust_u=hdu_dust[1].data.field(1)
        hdu_dust.close()
        
        dust_q=hp.reorder(dust_q,n2r=1)
        tmp_alm=hp.map2alm(dust_q)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
        #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True)
        dust_q=hp.alm2map(tmp_alm,nside_in,verbose=False)
        dust_q_back=np.copy(dust_q)
        
        dust_u=hp.reorder(dust_u,n2r=1)
        tmp_alm=hp.map2alm(dust_u)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
        #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True)
        dust_u=hp.alm2map(tmp_alm,nside_in,verbose=False)
        dust_u_back=np.copy(dust_u)
	
	print 'Generating Map'
	cls=hp.read_cl(cl_file)
	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	

	num_wl=len(wl)
        no_noise=[]
	t_array=np.zeros((num_wl,npix))	
	q_array=np.zeros((num_wl,npix))
	sigma_q=np.zeros((num_wl,npix))
	u_array=np.zeros((num_wl,npix))
	sigma_u=np.zeros((num_wl,npix))
	for i in range(num_wl):
		print('\tFrequency: {0:2.1f}'.format(bands[i]))
		tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio);
                no_noise.append(hp.smoothing(np.copy(tmp_cmb), fwhm=q_fwhm[i]*np.pi/(180*60.),pol=1,verbose=False))
		sigma_q[i]=np.random.normal(0,1,npix)*noise_const_q[i]
		sigma_u[i]=np.random.normal(0,1,npix)*noise_const_q[i]
		tmp_cmb[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q    )
		tmp_cmb[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u    )
		tmp_out=hp.sphtfunc.smoothing(tmp_cmb,fwhm=q_fwhm[i]*np.pi/(180.*60.),pol=1,verbose=False)
		t_array[i],q_array[i],u_array[i]=tmp_out
		#sigma_q[i]=hp.sphtfunc.smoothing(tmp_q,fwhm=np.pi/180.)
		#sigma_u[i]=hp.sphtfunc.smoothing(tmp_u,fwhm=np.pi/180.)
	
	print "Time to Write Fields"
	dx=1./(60.)*3
	nx=np.int(15/dx)
	ny=nx
	all_pix=[]
	field_pix=[]
	square_pix=[]
	quiet_mask=np.zeros(npix)
	prim=fits.PrimaryHDU()
	prim.header['COMMENT']="Simulated Quiet Data"
	prim.header['COMMENT']="Created using CAMB"
	for p in xrange(len(centers)):
		coords=regioncoords(centers[p,0],centers[p,1],dx,nx,ny)
		coords_sky=SkyCoord(ra=coords[:,0],dec=coords[:,1],unit=u.degree,frame='fk5')
		phi=coords_sky.galactic.l.deg*np.pi/180.
		theta=(90-coords_sky.galactic.b.deg)*np.pi/180.
		pixels=hp.ang2pix(nside,theta,phi)
		quiet_mask[pixels]=1
		unique_pix=(np.unique(pixels).tolist())
		field_pix.append(unique_pix)
		square_pix.append(pixels)
		all_pix.extend(unique_pix)
		pix_col=fits.Column(name='PIXEL',format='1J',array=unique_pix)
		for f in xrange(num_wl):
			region_mask=np.zeros(npix)
			region_mask[pixels]=1
			region_map_t=np.array(t_array[f][pixels]).reshape((nx,ny))
			region_map_q=np.array(q_array[f][pixels]).reshape((nx,ny))
			region_map_u=np.array(u_array[f][pixels]).reshape((nx,ny))
			region_delta_q=np.array(sigma_q[f][pixels]).reshape((nx,ny))
			region_delta_u=np.array(sigma_u[f][pixels]).reshape((nx,ny))
			prim=fits.PrimaryHDU()
			q_head=fits.ImageHDU([region_map_t,region_map_q,region_map_u],name="STOKES IQU")
			q_head.header['TFIELDS']=(3,'number of fields in each row')
			q_head.header['TTYPE1']=('SIGNAL', "STOKES I, Temperature")
			q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TTYPE2']='STOKES Q'
			q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TTYPE3']='STOKES U'
			q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TFORM1']='E'
			q_head.header['TFORM1']='E'
			q_head.header['TFORM2']='E'
			q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
			q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
			q_head.header["COORDSYS"]=('G','Pixelization coordinate system')
			q_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
			q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
			q_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
			err_head=fits.ImageHDU([region_delta_q,region_delta_u],name="Q/U UNCERTAINTIES")
			err_head.header['TFIELDS']=(2,'number of fields in each row')
			err_head.header['NSIDE']=1024
			err_head.header['ORDERING']='RING'
			err_head.header['TTYPE1']='SIGMA Q'
			err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			err_head.header['TTYPE2']='SIGMA U'
			err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
			err_head.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
			err_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
			m_head=fits.ImageHDU(region_mask,name='MASK')	
			sqr_pix_col=fits.Column(name='PIXELS',format='1J',array=pixels)
			sqr_pix_cols=fits.ColDefs([sqr_pix_col])
			sqr_pix_head=fits.BinTableHDU.from_columns(sqr_pix_cols)
			hdulist=fits.HDUList([prim,q_head,err_head,m_head,sqr_pix_head])
			hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}_cmb{:1d}.fits".format(bands[f],p+1),clobber=True)
			print '{:.1f}_cmb{:1d}.fits'.format(bands[f],p+1)
	
	
	mask_head=fits.ImageHDU(quiet_mask,name='MASK')
	pix_col=fits.Column(name='PIXEL',format='1J',array=all_pix)
	field_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=field_pix[0])
	field_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=field_pix[1])
	field_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=field_pix[2])
	field_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=field_pix[3])
	
	sqr_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=square_pix[0])
	sqr_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=square_pix[1])
	sqr_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=square_pix[2])
	sqr_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=square_pix[3])
	cols1=fits.ColDefs([sqr_pix_col1,sqr_pix_col2,sqr_pix_col3,sqr_pix_col4])
	tbhdu1=fits.BinTableHDU.from_columns(cols1)
	tbhdu1.header['TFIELDS']=(4,'number of fields in each row')
	tbhdu1.header["TTYPE1"]=("PIXELS CMB FIELD 1","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE2"]=("PIXELS CMB FIELD 2","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE3"]=("PIXELS CMB FIELD 3","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE4"]=("PIXELS CMB FIELD 4","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["EXTNAME"]="SQUARE PIXELS"
	tbhdu1.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
	tbhdu1.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
	tbhdu1.header["NSIDE"]=(nside,'Healpix Resolution paramter')
	tbhdu1.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
	tbhdu1.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
	tbhdu1.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
	tbhdu1.header["COORDSYS"]=('G','Pixelization coordinate system')
	for i in xrange(num_wl):
		cut_t,cut_q,cut_u=t_array[i][all_pix],q_array[i][all_pix],u_array[i][all_pix]
		cut_dq,cut_du=sigma_q[i][all_pix],sigma_u[i][all_pix]
		col_t=fits.Column(name='SIGNAL',format='1E',unit='K_{CMB}',array=cut_t)
		col_q=fits.Column(name='STOKES Q',format='1E',unit='K_{CMB}',array=cut_q)
		col_u=fits.Column(name='STOKES U',format='1E',unit='K_{CMB}',array=cut_u)
		col_dq=fits.Column(name='Q ERROR',format='1E',unit='K_{CMB}',array=cut_dq)
		col_du=fits.Column(name='U ERROR',format='1E',unit='K_{CMB}',array=cut_du)
		cols=fits.ColDefs([pix_col,col_q,col_u,col_dq,col_du])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['TFIELDS']=(5,'number of fields in each row')
		tbhdu.header["TTYPE2"]=("SIGNAL","STOKES T")
		tbhdu.header["EXTNAME"]="SIGNAL"
		tbhdu.header['POLAR']= 'T'
		tbhdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		tbhdu.header["NSIDE"]=(1024,'Healpix Resolution paramter')
		tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
		tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
		tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		tblist=fits.HDUList([prim,tbhdu])
		tblist.writeto(output_prefix+'quiet_partial_simulated_{:.1f}.fits'.format(bands[i]),clobber=True)
	
		q_head=fits.ImageHDU(np.array([t_array[i],q_array[i],u_array[i]]), name='STOKES IQU')
		q_head.header['TFIELDS']=(3,'number of fields in each row')
		q_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature")
		q_head.header['TYPE2']='STOKES Q'
		q_head.header['TYPE3']='STOKES U'
		q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TFORM1']='E'
		q_head.header['TFORM1']='E'
		q_head.header['TFORM2']='E'
		q_head.header['EXTNAME']='STOKES IQU'
		q_head.header['POLAR']= 'T'
		q_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		q_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
		q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		q_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		q_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		q_head.header["COORDSYS"]=('G','Pixelization coordinate system')
		#########################

		theo_head=fits.ImageHDU(no_noise[i], name='No Noise IQU')
		theo_head.header['TFIELDS']=(3,'number of fields in each row')
		theo_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature")
		theo_head.header['TYPE2']='STOKES Q'
		theo_head.header['TYPE3']='STOKES U'
		theo_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TFORM1']='E'
		theo_head.header['TFORM1']='E'
		theo_head.header['TFORM2']='E'
		theo_head.header['EXTNAME']='no noise iqu'
		theo_head.header['POLAR']= 'T'
		theo_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		theo_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		theo_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		theo_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
		theo_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		theo_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		theo_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		theo_head.header["COORDSYS"]=('G','Pixelization coordinate system')
		

####################################
		#tblist=fits.HDUList([prim,tbhdu])
		err_head=fits.ImageHDU(np.array([sigma_q[i],sigma_u[i]]),name='Q/U UNCERTAINTIES')
		err_head.header['TFIELDS']=(2,'number of fields in each row')
		err_head.header['NSIDE']=1024
		err_head.header['ORDERING']='RING'
		err_head.header['TTYPE1']='SIGMA Q'
		err_head.header['TTYPE2']='SIGMA U'
		err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		err_head.header['TFORM1']='E'
		err_head.header['TFORM2']='E'
		err_head.header['EXTNAME']='Q/U UNCERTAINTIES'
		err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		err_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		err_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		err_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		cols=fits.ColDefs([field_pix_col1,field_pix_col2,field_pix_col3,field_pix_col4])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['TFIELDS']=(4,'number of fields in each row')
		tbhdu.header["TTYPE1"]=("PIXELS CMB FIELD 1","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE2"]=("PIXELS CMB FIELD 2","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE3"]=("PIXELS CMB FIELD 3","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE4"]=("PIXELS CMB FIELD 4","PIXEL NUMBER BY FIELD")
		tbhdu.header["EXTNAME"]="FIELD PIXELS"
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		tbhdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
		tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
		tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		hdulist=fits.HDUList([prim,q_head,err_head,mask_head,tbhdu,tbhdu1,theo_head])
		hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}.fits".format(bands[i]),clobber=True)
		print "quiet_simulated_{:.1f}.fits".format(bands[i])