Example #1
0
def load_wmap_masks(fname, n2r=True):
    """
    Loads the WMAP ILC region definition files and constructs the region masks.

    Returns `masks`, `region`.

    `masks` is a 12 x Npix uint16 array, each row of which is a mask map.
    For region `i`, all pixels in `masks`[i] that are 1 should contribute to
    the ILC weighting.

    `region` is a map describing to which region each pixel belongs. The ILC
    solution for region `i` (i.e. using pixels from `masks`[i]) should be
    assigned to pixels whose value is `i`.

    Note that region 0 uses pixels primarily from the galaxy but applies its
    solution to pixels primarily away from the galaxy. This is correct.
    """
    with fits.open(fname) as f:
        fdata = f[1]
        bitmask = fdata.data['TEMPERATURE'].astype('uint16')
        region = fdata.data['N_OBS'].astype('uint16')
        if n2r:
            bitmask = hp.reorder(bitmask, n2r=True)
            region = hp.reorder(region, n2r=True)

    masks = np.empty([12, len(bitmask)], dtype='bool')
    for i in range(12):
        masks[i] = (bitmask >> i) & 1
    return masks, region
Example #2
0
def resolve(pix, new_nside, nest=False):
    nside = hp.npix2nside(pix.shape[0])

    if not nest:
        rpix = resolve(hp.reorder(pix, r2n=True), new_nside, nest=True)
        return hp.reorder(rpix, n2r=True)
    else:
        if nside == new_nside:
            return pix
        elif nside < new_nside:
            # Interpolate up one resolution
            nnew = hp.nside2npix(nside*2)
            inew = np.arange(0, nnew, dtype=np.int)

            new_pix = np.zeros(nnew)

            new_pix = pix[inew//4]

            return resolve(new_pix, new_nside, nest=nest)
        else:
            # Intepolate down one resolution
            nnew = hp.nside2npix(nside/2)
            inew = np.arange(0, nnew, dtype=np.int)

            new_pix = np.zeros(nnew)

            new_pix = 0.25*(pix[::4] + pix[1::4] + pix[2::4] + pix[3::4])

            return resolve(new_pix, new_nside, nest=nest)
Example #3
0
def interpolate_nested(m, nest=False):
    """
    Apply bilinear interpolation to a multiresolution HEALPix map, assuming
    that runs of pixels containing identical values are nodes of the tree. This
    smooths out the stair-step effect that may be noticeable in contour plots.

    Here is how it works. Consider a coarse tile surrounded by base tiles, like
    this:

                +---+---+
                |   |   |
                +-------+
                |   |   |
        +---+---+---+---+---+---+
        |   |   |       |   |   |
        +-------+       +-------+
        |   |   |       |   |   |
        +---+---+---+---+---+---+
                |   |   |
                +-------+
                |   |   |
                +---+---+

    The value within the central coarse tile is computed by downsampling the
    sky map (averaging the fine tiles), upsampling again (with bilinear
    interpolation), and then finally copying the interpolated values within the
    coarse tile back to the full-resolution sky map. This process is applied
    recursively at all successive HEALPix resolutions.

    Note that this method suffers from a minor discontinuity artifact at the
    edges of regions of coarse tiles, because it temporarily treats the
    bordering fine tiles as constant. However, this artifact seems to have only
    a minor effect on generating contour plots.

    Parameters
    ----------

    m: `~numpy.ndarray`
        a HEALPix array

    nest: bool, default: False
        Whether the input array is stored in the `NESTED` indexing scheme (True)
        or the `RING` indexing scheme (False).

    """
    # Convert to nest indexing if necessary, and make sure that we are working
    # on a copy.
    if nest:
        m = m.copy()
    else:
        m = hp.reorder(m, r2n=True)

    _interpolate_level(m)

    # Convert to back ring indexing if necessary
    if not nest:
        m = hp.reorder(m, n2r=True)

    # Done!
    return m
 def gaussian_smoothing(sig, sigma, nest=True):
     if nest:
         sig = hp.reorder(sig, n2r=True)
     smooth = hp.sphtfunc.smoothing(sig, sigma=arcmin2rad(sigma))
     if nest:
         smooth = hp.reorder(smooth, r2n=True)
     return smooth
Example #5
0
def build_index_dict(params):
    """
    Build a dictionary containing
    1. a list of the indices of the ROI pixels at each hierarchy level, from the selected nside downwards to 1
    2. a list of the indices of the ROI pixels at each hierarchy level such that the nside=1 pixel is contained
    at each hierarchy level.
    3. a list mapping the ROI indices to the extended ROI
    NOTE: NESTED FORMAT!
    :param params: parameter dictionary
    :return: dictionary containing information about the ROI
    """

    # Set up the mask for the ROI
    inner_band = params.data["inner_band"]
    outer_rad = params.data["outer_rad"]
    nside = params.data["nside"]
    nsides = params.nn.arch["nsides"]
    roi = make_mask_total(band_mask=True, band_mask_range=inner_band, mask_ring=True, inner=0,
                          outer=outer_rad, nside=nside)
    if params.data["mask_type"] == "3FGL":
        roi = (1 - (1 - roi) * (1 - get_template(params.gen["fermi_folder"], "3FGL_mask"))).astype(bool)
    elif params.data["mask_type"] == "4FGL":
        roi = (1 - (1 - roi) * (1 - get_template(params.gen["fermi_folder"], "4FGL_mask"))).astype(bool)
    roi = hp.reorder(roi, r2n=True)

    roi_extended = hp.reorder(make_mask_total(nside=1, mask_ring=True, inner=0,
                                              outer=outer_rad), r2n=True)
    roi_dict = dict()
    roi_dict["indexes"] = get_pixels_with_holes(roi, nsides)
    roi_dict["indexes_extended"] = get_pixels(roi_extended, nsides)
    roi_dict["ind_holes_to_ex"] = [np.asarray([np.argwhere(roi_dict["indexes_extended"][i] == ind)[0][0]
                                                for ind in roi_dict["indexes"][i]])
                                    for i in range(len(roi_dict["indexes"]))]

    return roi_dict
def interpolate_nested(m, nest=False):
    """
    Apply bilinear interpolation to a multiresolution HEALPix map, assuming
    that runs of pixels containing identical values are nodes of the tree. This
    smooths out the stair-step effect that may be noticeable in contour plots.

    Here is how it works. Consider a coarse tile surrounded by base tiles, like
    this:

                +---+---+
                |   |   |
                +-------+
                |   |   |
        +---+---+---+---+---+---+
        |   |   |       |   |   |
        +-------+       +-------+
        |   |   |       |   |   |
        +---+---+---+---+---+---+
                |   |   |
                +-------+
                |   |   |
                +---+---+

    The value within the central coarse tile is computed by downsampling the
    sky map (averaging the fine tiles), upsampling again (with bilinear
    interpolation), and then finally copying the interpolated values within the
    coarse tile back to the full-resolution sky map. This process is applied
    recursively at all successive HEALPix resolutions.

    Note that this method suffers from a minor discontinuity artifact at the
    edges of regions of coarse tiles, because it temporarily treats the
    bordering fine tiles as constant. However, this artifact seems to have only
    a minor effect on generating contour plots.

    Parameters
    ----------

    m: `~numpy.ndarray`
        a HEALPix array

    nest: bool, default: False
        Whether the input array is stored in the `NESTED` indexing scheme
        (True) or the `RING` indexing scheme (False).

    """
    # Convert to nest indexing if necessary, and make sure that we are working
    # on a copy.
    if nest:
        m = m.copy()
    else:
        m = hp.reorder(m, r2n=True)

    _interpolate_level(m)

    # Convert to back ring indexing if necessary
    if not nest:
        m = hp.reorder(m, n2r=True)

    # Done!
    return m
Example #7
0
def rotate_map_to_axis(m, ra, dec, nest=False, method="direct"):
    """Rotate a sky map to place a given line of sight on the +z axis.

    Parameters
    ----------

    m : np.ndarray
        The input HEALPix array.

    ra : float
        Right ascension of axis in radians.

        To specify the axis in geocentric coordinates, supply ra=(lon + gmst),
        where lon is the geocentric longitude and gmst is the Greenwich mean
        sidereal time in radians.

    dec : float
        Declination of axis in radians.

        To specify the axis in geocentric coordinates, supply dec=lat,
        where lat is the geocentric latitude in radians.

    nest : bool, default=False
        Indicates whether the input sky map is in nested rather than
        ring-indexed HEALPix coordinates (default: ring).

    method : 'direct' or 'fft'
        Select whether to use spherical harmonic transformation ('fft') or
        direct coordinate transformation ('direct')

    Returns
    -------

    m_rotated : np.ndarray
        The rotated HEALPix array.
    """
    npix = len(m)
    nside = hp.npix2nside(npix)

    theta = 0.5 * np.pi - dec
    phi = ra

    if method == "fft":
        if nest:
            m = hp.reorder(m, n2r=True)
        alm = hp.map2alm(m)
        hp.rotate_alm(alm, -phi, -theta, 0.0)
        ret = hp.alm2map(alm, nside, verbose=False)
        if nest:
            ret = hp.reorder(ret, r2n=True)
    elif method == "direct":
        R = hp.Rotator(rot=np.asarray([0, theta, -phi]), deg=False, inv=False, eulertype="Y")
        theta, phi = hp.pix2ang(nside, np.arange(npix), nest=nest)
        ipix = hp.ang2pix(nside, *R(theta, phi), nest=nest)
        ret = m[ipix]
    else:
        raise ValueError("Unrecognized method: {0}".format(method))

    return ret
Example #8
0
def load_wmap_maps_QU(fnames, n2r=True, Nside=None):
    """
    Load WMAP-format polarization maps from file (i.e. from LAMBDA).

    Returns an n x Npix array, where n = len(`fnames`) is the number of maps
    and Npix is the number of pixels in each map. All maps read in at one
    time must have the same length (Npix).

    If `fnames` is a single file and not a list, reads and returns the single
    specified map.

    If `n2r` is True, converts from NEST format (the WMAP standard) to RING
    format (the healpy standard).

    Scales the map to the specified `Nside`. If None, does not rescale the map.
    All input maps must have the same base Nside.
    """
    colname = 'BESTFIT'
    try:
        Nbands = np.shape(fnames)[0]
    except IndexError:
        # Single-map case. Just read and return the map.
        with fits.open(fnames) as f:
            a = f[1].data[colname]
            base_Nside = f[1].header['NSIDE']
        if (Nside is not None) and (Nside != base_Nside):
            a = hp.ud_grade(a, Nside, order_in='NESTED', order_out='NESTED')
        if n2r:
            a = hp.reorder(a, n2r=True)
        return a

    fname0 = fnames[0]

    # Initialize array and populate first band
    with fits.open(fname0) as f0:
        Npix = f0[1].header['NAXIS2']
        base_Nside = f0[1].header['NSIDE']
        a = np.empty([Nbands, Npix])
        a[0] = f0[1].data[colname]

    for i, fname in enumerate(fnames[1:]):
        with fits.open(fname) as f:
            a[i + 1] = f[1].data[colname]

    if (Nside is not None) and (Nside != base_Nside):
        b = np.empty([a.shape[0], hp.nside2npix(Nside)])
        for i in range(a.shape[0]):
            b[i] = hp.ud_grade(a[i],
                               Nside,
                               order_in='NESTED',
                               order_out='NESTED')
        a = b
    if n2r:
        for i in range(a.shape[0]):
            a[i] = hp.reorder(a[i], n2r=True)

    return a
Example #9
0
def plug_holes(m, verbose=False, in_place=True, nest=False):
    """
    Use simple downgrading to derive estimates of the missing pixel values
    """
    autotimer = timing.auto_timer()
    nbad_start = np.sum(np.isclose(m, hp.UNSEEN))

    if nbad_start == m.size:
        if verbose:
            print('plug_holes: All map pixels are empty. Cannot plug holes',
                  flush=True)
        return

    if nbad_start == 0:
        return

    nside = hp.get_nside(m)
    npix = m.size
    if nest:
        mnest = m.copy()
    else:
        mnest = hp.reorder(m, r2n=True)

    lowres = mnest
    nside_lowres = nside
    bad = np.isclose(mnest, hp.UNSEEN)
    while np.any(bad) and nside_lowres > 1:
        nside_lowres //= 2
        lowres = hp.ud_grade(lowres, nside_lowres, order_in='NESTED')
        hires = hp.ud_grade(lowres, nside, order_in='NESTED')
        bad = np.isclose(mnest, hp.UNSEEN)
        mnest[bad] = hires[bad]

    nbad_end = np.sum(bad)

    if nbad_end != 0:
        mn = np.mean(mnest[np.logical_not(bad)])
        mnest[bad] = mn

    if not in_place:
        m = m.copy()
    if nest:
        m[:] = mnest
    else:
        m[:] = hp.reorder(mnest, n2r=True)

    if verbose and nbad_start != 0:
        print('plug_holes: Filled {} missing pixels ({:.2f}%), lowest '
              'resolution was Nside={}.'.format(
                  nbad_start, (100.*nbad_start) // npix, nside_lowres))
    del autotimer
    return m
Example #10
0
def plug_holes(m, verbose=False, in_place=True, nest=False):
    """Use simple downgrading to derive estimates of the missing pixel values
    """
    nbad_start = np.sum(np.isclose(m, hp.UNSEEN))

    if nbad_start == m.size:
        if verbose:
            print("plug_holes: All map pixels are empty. Cannot plug holes", flush=True)
        return

    if nbad_start == 0:
        return

    nside = hp.get_nside(m)
    npix = m.size
    if nest:
        mnest = m.copy()
    else:
        mnest = hp.reorder(m, r2n=True)

    lowres = mnest
    nside_lowres = nside
    bad = np.isclose(mnest, hp.UNSEEN)
    while np.any(bad) and nside_lowres > 1:
        nside_lowres //= 2
        lowres = hp.ud_grade(lowres, nside_lowres, order_in="NESTED")
        hires = hp.ud_grade(lowres, nside, order_in="NESTED")
        bad = np.isclose(mnest, hp.UNSEEN)
        mnest[bad] = hires[bad]

    nbad_end = np.sum(bad)

    if nbad_end != 0:
        mn = np.mean(mnest[np.logical_not(bad)])
        mnest[bad] = mn

    if not in_place:
        m = m.copy()
    if nest:
        m[:] = mnest
    else:
        m[:] = hp.reorder(mnest, n2r=True)

    if verbose and nbad_start != 0:
        print(
            "plug_holes: Filled {} missing pixels ({:.2f}%), lowest "
            "resolution was Nside={}.".format(
                nbad_start, (100.0 * nbad_start) // npix, nside_lowres
            )
        )
    return m
Example #11
0
def map_variance(input_map, nside):

    inp_nside = healpy.get_nside(input_map)

    # Convert to NESTED and then take advantage of this to group into lower
    # resolution pixels
    map_nest = healpy.reorder(input_map, r2n=True)
    map_nest = map_nest.reshape(-1, (inp_nside / nside)**2)

    # Calculate the variance in each low resolution pixel
    var_map = map_nest.var(axis=1)

    # Convert back to RING and return
    return healpy.reorder(var_map, n2r=True)
Example #12
0
def map_variance(input_map, nside):

    inp_nside = healpy.get_nside(input_map)

    # Convert to NESTED and then take advantage of this to group into lower
    # resolution pixels
    map_nest = healpy.reorder(input_map, r2n=True)
    map_nest = map_nest.reshape(-1, (inp_nside / nside) ** 2)

    # Calculate the variance in each low resolution pixel
    var_map = map_nest.var(axis=1)

    # Convert back to RING and return
    return healpy.reorder(var_map, n2r=True)
Example #13
0
def load_channels(fname_fmt,
                  logger=log.null_logger,
                  TabbeamChan=r3_channel_beams,
                  verbose=False):
    '''
    Loads planck channels 1-9 in the files described by fname_fmt, and applies
    appropriate beam transformations and cuts.
    '''
    with log.Timer(logger, 'reading in all freq maps'):
        all_bands = np.array([
            hp.ud_grade(hp.read_map(fname_fmt.format(planck_freqs[chan_i]),
                                    verbose=verbose),
                        2048,
                        order_in='ring',
                        order_out='ring') for chan_i in range(9)
        ])

    all_bands[all_bands < -1e6] = 0.0

    all_bands[7] /= cfact_545_857[0]
    all_bands[8] /= cfact_545_857[1]

    output_maps = np.zeros_like(all_bands)

    with log.Timer(logger, 'filtering LFI bands'):
        alms = hp.map2alm(all_bands[:3],
                          lmax=2500,
                          use_weights=True,
                          pol=False)
        for i in range(alms.shape[0]):
            norm = TabbeamChan[i, 0]
            beameq = resolution_change_pwf * (tabbeam[i, :2049] * norm /
                                              TabbeamChan[i, :2049])
            hp.almxfl(alms[i], beameq, inplace=True)
            output_maps[i] = col_cmb[i] * hp.reorder(
                hp.alm2map(alms[i], 2048, verbose=verbose), r2n=True)

    with log.Timer(logger, 'filtering HFI bands'):
        alms = hp.map2alm(all_bands[3:],
                          lmax=4000,
                          use_weights=True,
                          pol=False)
        for i in range(alms.shape[0]):
            norm = TabbeamChan[3 + i, 0]
            beameq = norm * tabbeam[3 + i] / TabbeamChan[3 + i]
            hp.almxfl(alms[i], beameq, inplace=True)
            output_maps[3 + i] = col_cmb[3 + i] * hp.reorder(
                hp.alm2map(alms[i], 2048, verbose=verbose), r2n=True)

    return output_maps
Example #14
0
    def smooth(self, fwhm, lmax=None, pol_only=False):
        """Smooth the map with a Gaussian kernel.
        """
        if self.rank == 0:
            if pol_only:
                print("Smoothing the polarization to {} arcmin".format(fwhm),
                      flush=True)
            else:
                print("Smoothing the map to {} arcmin".format(fwhm),
                      flush=True)

        if lmax is None:
            lmax = min(np.int(fwhm / 60 * 512), 2 * self.nside)

        # If the map is in node-shared memory, only the root process on each
        # node does the smoothing.
        if not self.shmem or self._map.nodecomm.rank == 0:
            if self.pol:
                m = np.vstack([self._map[:], self._map_Q[:], self._map_U[:]])
            else:
                m = self._map[:]
            if self.nest:
                m = hp.reorder(m, n2r=True)
            smap = hp.smoothing(m,
                                fwhm=fwhm * arcmin,
                                lmax=lmax,
                                verbose=False)
            del m
            if self.nest:
                smap = hp.reorder(smap, r2n=True)
        else:
            # Convenience dummy variable
            smap = np.zeros([3, 12])

        if not pol_only:
            if self.shmem:
                self._map.set(smap[0].astype(DTYPE), (0, ), fromrank=0)
            else:
                self._map[:] = smap[0]

        if self.pol:
            if self.shmem:
                self._map_Q.set(smap[1].astype(DTYPE), (0, ), fromrank=0)
                self._map_U.set(smap[2].astype(DTYPE), (0, ), fromrank=0)
            else:
                self._map_Q[:] = smap[1]
                self._map_U[:] = smap[2]

        self.pol_fwhm = fwhm
        return
Example #15
0
    def smooth(self, fwhm, lmax=None, pol_only=False):
        """ Smooth the map with a Gaussian kernel.
        """
        autotimer = timing.auto_timer(type(self).__name__)
        if self.rank == 0:
            if pol_only:
                print('Smoothing the polarization to {} arcmin'.format(fwhm),
                      flush=True)
            else:
                print('Smoothing the map to {} arcmin'.format(fwhm), flush=True)

        if lmax is None:
            lmax = min(np.int(fwhm / 60 * 512), 2 * self.nside)

        # If the map is in node-shared memory, only the root process on each
        # node does the smoothing.
        if not self.shmem or self._map.nodecomm.rank == 0:
            if self.pol:
                m = np.vstack([self._map[:], self._map_Q[:], self._map_U[:]])
            else:
                m = self._map[:]
            if self.nest:
                m = hp.reorder(m, n2r=True)
            smap = hp.smoothing(m, fwhm=fwhm * arcmin, lmax=lmax, verbose=False)
            del m
            if self.nest:
                smap = hp.reorder(smap, r2n=True)
        else:
            # Convenience dummy variable
            smap = np.zeros([3, 12])

        if not pol_only:
            if self.shmem:
                self._map.set(smap[0].astype(DTYPE), (0,), fromrank=0)
            else:
                self._map[:] = smap[0]

        if self.pol:
            if self.shmem:
                self._map_Q.set(smap[1].astype(DTYPE), (0,), fromrank=0)
                self._map_U.set(smap[2].astype(DTYPE), (0,), fromrank=0)
            else:
                self._map_Q[:] = smap[1]
                self._map_U[:] = smap[2]

        self.pol_fwhm = fwhm
        del autotimer
        return
Example #16
0
    def resolved_visibility_illumination_matrix(self, p):
        V = self.visibility_illumination_matrix(p)

        assert self.nside_illum >= self.nside, 'resolution mismatch: nside > nside_illum'

        if self.nside_illum == self.nside:
            return V
        else:
            nside_V = self.nside_illum
            V = np.array(hp.reorder(V, r2n=True))
            while nside_V > self.nside:
                V = V[:, ::4] + V[:, 1::4] + V[:, 2::4] + V[:, 3::4]
                nside_V = nside_V / 2
            V = np.array(hp.reorder(V, n2r=True))

            return V
def _rotate_maps(map, ctx, downsampling=False):
    """
    Will rotate maps into position of MASK/TRIMMED-MASK_cut=0 and adjust for equal size
    :param map: A dictionary of Healpix convergence maps
    :param ctx: Context instance
    :returns dataset: tf.data.dataset containing the mask of the rotated maps
             rotated_indices_ext: np.array containing the healpy indices of interest in nested order.
    """
    indices = np.arange(len(map))[map > hp.UNSEEN]
    delta, alpha = hp.pix2ang(ctx["NSIDE"], indices)

    mock_cat = catalog(alphas=alpha, deltas=delta, degree=False, colat=True)
    alpha, delta = mock_cat._rotate_coordinates(
        alpha_rot=2 * np.pi - ctx["alpha_rotations"][ctx["index_counter"]],
        delta_rot=2 * np.pi - ctx["dec_rotations"][ctx["index_counter"]],
        mirror=ctx["mirror"][ctx["index_counter"]])
    pix = mock_cat._pixelize(alpha, delta)
    rotated_indices_ext = extend_indices(indices=pix,
                                         nside_in=ctx["NSIDE"],
                                         nside_out=ctx["NSIDE_OUT"],
                                         nest=False)
    rotated_map = np.full_like(map, hp.UNSEEN)
    zero_padding = np.zeros_like(map)
    rotated_map[rotated_indices_ext] = zero_padding[rotated_indices_ext]
    rotated_map[pix] = map[indices]

    if downsampling:
        rotated_map = hp.ud_grade(rotated_map,
                                  downsampling,
                                  order_out="NESTED")
    else:
        rotated_map = hp.reorder(rotated_map, r2n=True)

    return rotated_map
Example #18
0
def make_maps(args, comm, mode):
    hp.disable_warnings()
    nprocs = comm.Get_size()
    rank = comm.Get_rank()
    name = MPI.Get_processor_name()
    
    if rank == 0:
        reals = np.arange(args.nreal)
        chunks = np.array_split(reals, nprocs)
    else:
        chunks = None
        
    chunk = comm.scatter(chunks, root=0)
    
    if mode is not False:
        if mode == 'E':
            cl = make_cl(args, comm, mode)
        if mode == 'B':
            cl = make_cl(args, comm, mode)
    else:
        cl = load_cl(args, comm)
        
    for i in range(chunk[0], chunk[-1]+1):
        if mode is not False:
            print(f'Rank {rank} is processing {mode} realization {i} on processor {name}')
            outdir = f"{args.outpath}/{mode}"
        else:
            print(f'Rank {rank} is processing realization {i} on processor {name}')
            outdir = f"{args.outpath}"

        m = hp.synfast(cl, args.nside, lmax=3*args.nside-1, pol=True, new=True)
        #m_smooth = hp.smoothing(m, args.beamfwhm *np.pi/10800)
        if not os.path.isdir(outdir):
            os.mkdir(outdir)
        hp.write_map(f"{outdir}/map_{i}.fits", hp.reorder(m, r2n=True), overwrite=True, nest= True, dtype=np.float64)
Example #19
0
def plot_filters_section(filters,
                         order=10,
                         xlabel='out map {}',
                         ylabel='in map {}',
                         title='Sections of the {} filters in the filterbank',
                         figsize=None,
                         **kwargs):
    """Plot the sections of all filters in a filterbank."""

    nside = hp.npix2nside(filters.G.N)
    npix = hp.nside2npix(nside)

    # Create an inverse mapping from nest to ring.
    index = hp.reorder(range(npix), n2r=True)

    # Get the index of the equator.
    index_equator, ind = get_index_equator(nside, order)
    nrows, ncols = filters.n_features_in, filters.n_features_out

    maps = filters.localize(ind, order=order)
    if maps.shape[0] == filters.G.N:
        # FIXME: old signal shape when not using Chebyshev filters.
        shape = (nrows, ncols, filters.G.N)
        maps = maps.T.reshape(shape)
    else:
        if nrows == 1:
            maps = np.expand_dims(maps, 0)
        if ncols == 1:
            maps = np.expand_dims(maps, 1)

    # Make the x axis: angular position of the nodes in degree.
    angle = hp.pix2ang(nside, index_equator, nest=True)[1]
    angle -= abs(angle[-1] + angle[0]) / 2
    angle = angle / (2 * np.pi) * 360

    if figsize == None:
        figsize = (12, 12 / ncols * nrows)

    # Plot everything.
    fig, axes = plt.subplots(nrows,
                             ncols,
                             figsize=figsize,
                             squeeze=False,
                             sharex='col',
                             sharey='row')

    ymin, ymax = 1.05 * maps.min(), 1.05 * maps.max()
    for row in range(nrows):
        for col in range(ncols):
            map = maps[row, col, index_equator]
            axes[row, col].plot(angle, map, **kwargs)
            axes[row, col].set_ylim(ymin, ymax)
            if row == nrows - 1:
                #axes[row, col].xaxis.set_ticks_position('top')
                #axes[row, col].invert_yaxis()
                axes[row, col].set_xlabel(xlabel.format(col))
            if col == 0:
                axes[row, col].set_ylabel(ylabel.format(row))
    fig.suptitle(title.format(filters.n_filters))  #, y=0.90)
    return fig
Example #20
0
def build_map(inmap,
              output,
              i,
              ntot,
              ns=1,
              g=0,
              sml=5.,
              lmax=3 * 2048,
              prefix=''):
    ch_mkdir(dest)
    dont = 1
    npatch = 12 * ns**2
    for j in range(npatch):
        if not os.path.exists(output + prefix + str(i * npatch + j) + '.npy'):
            dont = 0
    if dont:
        return

    if sml != 0:
        s = hp.read_map(inmap, nest=0, verbose=0)
        if g:
            s = kelvin_check(s)
        s = hp.smoothing(s, np.radians(sml / 60.), lmax=lmax, verbose=0)
        s = hp.reorder(s, r2n=1)
    else:
        s = hp.read_map(inmap, nest=1, verbose=0)
    patches = sky2patch(s, ns)
    for j in range(npatch):
        pop_percent(i * npatch + j, ntot * npatch)
        np.save(output + prefix + str(i * npatch + j), patches[j])
        plt.imshow(patches[j], cmap=cmap)
        plt.savefig(output + prefix + str(i * npatch + j) + '.jpg')
        plt.close()
Example #21
0
def _rotate_map(map, ctx):
    indices = np.arange(len(map))[map > hp.UNSEEN]
    delta, alpha = hp.pix2ang(ctx["NSIDE"], indices)

    mock_cat = catalog(alphas=alpha, deltas=delta, degree=False, colat=True)
    alpha, delta = mock_cat._rotate_coordinates(
        alpha_rot=2 * np.pi - ctx["alpha_rotations"][ctx["index_counter"]],
        delta_rot=2 * np.pi - ctx["dec_rotations"][ctx["index_counter"]],
        mirror=ctx["mirror"][ctx["index_counter"]])
    pix = mock_cat._pixelize(alpha, delta)
    del (mock_cat)

    rotated_indices_ext = extend_indices(indices=pix,
                                         nside_in=ctx["NSIDE"],
                                         nside_out=ctx["NSIDE_OUT"],
                                         nest=False)
    rotated_map = np.full_like(map, hp.UNSEEN)
    zero_padding = np.zeros_like(map)
    rotated_map[rotated_indices_ext] = zero_padding[rotated_indices_ext]
    rotated_map[pix] = map[indices]

    if ctx["down_sample"]:
        rotated_map = hp.ud_grade(rotated_map,
                                  ctx["down_sample"],
                                  order_out="NESTED")
    else:
        rotated_map = hp.reorder(rotated_map, r2n=True)

    return rotated_map
Example #22
0
    def test_generate_healpix_map_ring(self):
        """
        Test the generation of a healpixmap in ring type
        """
        random.seed(seed=12345)

        nside_coverage = 32
        nside_map = 64

        n_rand = 1000
        ra = np.random.random(n_rand) * 360.0
        dec = np.random.random(n_rand) * 180.0 - 90.0
        value = np.random.random(n_rand)

        # Create a HEALPix map
        healpix_map = np.zeros(hp.nside2npix(nside_map),
                               dtype=np.float64) + hp.UNSEEN
        idx = hp.ang2pix(nside_map,
                         np.pi / 2 - np.radians(dec),
                         np.radians(ra),
                         nest=True)
        healpix_map[idx] = value
        # Create a HealSparseMap
        sparse_map = healsparse.HealSparseMap(nside_coverage=nside_coverage,
                                              healpix_map=healpix_map)
        hp_out_ring = sparse_map.generate_healpix_map(nside=nside_map,
                                                      nest=False)
        healpix_map_ring = hp.reorder(healpix_map, n2r=True)
        testing.assert_almost_equal(healpix_map_ring, hp_out_ring)
Example #23
0
def update_gwemoptconfig(grb_dic, conf_dic, params):
    """
    Update parameters for GRB alert on gwemopt

    :param gw_dic:
    :param conf_dic:
    :param params: dictionary to be used to start gwemopt and that
    will be updated/completed
    :return: updated params dictionary
    """

    # For GRB, do false
    if grb_dic["teles"] == "FERMI":
        params["do3D"] = False
        # parsing skymap
        params["doDatabase"] = True
        params["dateobs"] = grb_dic["dateobs"]
        order = hp.nside2order(grb_dic["skymap"]["nside"])
        t = rasterize(grb_dic["skymap"]["skymap"], order)
        result = t['PROB']
        flat = hp.reorder(result, 'NESTED', 'RING')
        params['map_struct'] = {}
        params['map_struct']['prob'] = flat
    if params["do3D"]:
        params["DISTMEAN"] = grb_dic["skymap"]["distmu"]
        params["DISTSTD"] = grb_dic["skymap"]["distsigma"]

        # Use galaxies to compute the grade, both for tiling and galaxy
        # targeting, only when dist_mean + dist_std < 400Mpc
        if params["DISTMEAN"] + params["DISTSTD"] <= conf_dic["Dist_cut"]:
            params["doUseCatalog"] = True
            params["doCatalog"] = True
            params["writeCatalog"] = True

    return params
Example #24
0
def get_gsm_map_lowres(frequency):
    freq = frequency
    nside = 64
    unit = 'MJysr'
    convert_ring = 'True'

    map_ni = np.loadtxt(script_path + '/data/lowres_maps.txt')

    spec_nf = np.loadtxt(script_path + '/data/spectra.txt')

    nfreq = spec_nf.shape[1]

    left_index = -1
    for i in range(nfreq - 1):
        if freq >= spec_nf[0, i] and freq <= spec_nf[0, i + 1]:
            left_index = i
            break
    if left_index < 0:
        print "FREQUENCY ERROR: %.2e GHz is outside supported frequency range of %.2e GHz to %.2e GHz."%(freq, spec_nf[0, 0], spec_nf[0, -1])

    interp_spec_nf = np.copy(spec_nf)
    interp_spec_nf[0:2] = np.log10(interp_spec_nf[0:2])
    x1 = interp_spec_nf[0, left_index]
    x2 = interp_spec_nf[0, left_index + 1]
    y1 = interp_spec_nf[1:, left_index]
    y2 = interp_spec_nf[1:, left_index + 1]
    x = np.log10(freq)
    interpolated_vals = (x * (y2 - y1) + x2 * y1 - x1 * y2) / (x2 - x1)
    result = np.sum(10.**interpolated_vals[0] * (interpolated_vals[1:, None] * map_ni), axis=0)

    result = hp.reorder(result, n2r=True)

    result *= 1e6 # convert from MJysr to Jysr

    return result
    def test_upgrade_healpix(self):
        """Test correctness of healpix upgrading

        """
        nside_in = 2
        nside_out = nside_in * 2  # must differ by 1 order for this test
        npix_in = hp.nside2npix(nside_in)
        npix_out = hp.nside2npix(nside_out)
        pix_i = 5
        # Upgrade pix_i in NSIDE=1 using cu
        # Downgrade all pixels in NSIDE=2 to NSIDE=1
        # Check if mappings from NSIDE=1 to NSIDE=2 match
        # Output is always NESTED
        # Test 1: Input pix_i is in NESTED
        # "visual" checks with https://healpix.jpl.nasa.gov/html/intronode4.htm
        actual = obs_utils.upgrade_healpix(pix_i, True, nside_in, nside_out)
        desired_all = np.arange(npix_out).reshape((npix_in, 4))
        desired = np.sort(desired_all[pix_i, :])  # NESTED
        np.testing.assert_array_equal(desired, [20, 21, 22, 23], "visual")
        np.testing.assert_array_equal(actual, desired, "input in NESTED")
        # Test 2: Input pix_i is in RING
        actual = obs_utils.upgrade_healpix(pix_i, False, nside_in, nside_out)
        # See https://stackoverflow.com/a/56675901
        # `reorder` reorders RING IDs in NESTED order
        # `reshape` is possible because the ordering is NESTED
        # indexing should be done with a NESTED ID because ordering is NESTED
        # but the output is in RING ID, which was reordered in the first place
        desired_all = hp.reorder(np.arange(npix_out), r2n=True).reshape(
            (npix_in, 4))
        desired_ring = desired_all[hp.ring2nest(nside_in, pix_i), :]
        np.testing.assert_array_equal(np.sort(desired_ring), [14, 26, 27, 43],
                                      "visual")
        desired_nest = hp.ring2nest(nside_out, desired_ring)
        np.testing.assert_array_equal(np.sort(actual), np.sort(desired_nest),
                                      "input in RING")
Example #26
0
    def run(self, list_maps):
        
        # http://legacysurvey.org/dr8/files/#random-catalogs
        FluxToMag = lambda flux: -2.5 * (np.log10(5/np.sqrt(flux)) - 9.)

        # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients
        ext = dict(g=3.214, r=2.165, z=1.211)


        
        self.maps = []
        self.list_maps = list_maps
        
        for map_i in self.list_maps:
            
            self.logger.info(f'read {map_i}')
            hpmap_i = self.templates[map_i]
            
            #--- fix depth
            if 'depth' in map_i:                
                self.logger.info(f'change {map_i} units')
                _,band = map_i.split('_')                
                hpmap_i = FluxToMag(hpmap_i)
                
                if band in 'rgz':
                    self.logger.info(f'apply extinction on {band}')
                    hpmap_i -= ext[band]*self.templates['ebv']
                
            #--- rotate
            self.maps.append(hp.reorder(hpmap_i, n2r=True))   
Example #27
0
def compute_spherical_harmonics(nside, lmax):
    """Compute the spherical harmonics up to lmax.

    Returns
    -------
    harmonics: array of shape n_pixels x n_harmonics
        Harmonics are in nested order.
    """

    n_harmonics = np.sum(np.arange(1, 2 * lmax + 2, 2))
    harmonics = np.empty((hp.nside2npix(nside), n_harmonics))
    midx = 0

    for l in range(lmax + 1):
        for m in range(-l, l + 1):
            size = hp.sphtfunc.Alm.getsize(l, mmax=l)
            alm = np.zeros(size, dtype=np.complex128)
            idx = hp.sphtfunc.Alm.getidx(l, l, abs(m))
            alm[idx] = 1 if m == 0 else (1 - 1j) / np.sqrt(2) if m < 0 else (
                1 + 1j) / np.sqrt(2)
            harmonic = hp.sphtfunc.alm2map(alm, nside, l, verbose=False)
            harmonic /= np.sqrt(np.sum(harmonic**2))
            harmonics[:, midx] = hp.reorder(harmonic, r2n=True)
            midx += 1

    return harmonics
Example #28
0
File: cmb.py Project: Kemalakin/cmb
def generate_maps(Nside=512,
                  lensed=True,
                  n2r=False,
                  return_cls=False,
                  fname=None):
    """
    Generates a realization of the standard LCDM CMB T, Q, and U maps,
    using parameters that should agree with Planck 2013, in units of uK,
    in thermodynamic units.

    The maps are in Healpix format. If `n2r` is True, the resulting maps will be
    in RING format. Otherwise, the resulting maps will be in NEST format.

    `Nside` determines the number of pixels in each map. Npix = 12*Nside^2.

    If `lensed` is True, uses the lensed Cl's. Otherwise uses the unlensed
    Cl's.

    If `return_cls` is True, the dictionary of Cl's is also returned. These
    Cl's are in units of uK^2 and are related to Dl's by

        D_\ell^{XX} = \frac{\ell(\ell+1)}{2\pi} C_\ell^{XX}
    """
    cldict = get_cls(lensed=lensed, fname=fname)
    cls = np.array([cldict[xx] for xx in ('TT', 'EE', 'BB', 'TE')])
    maps = list(hp.synfast(cls, nside=Nside, pol=True, new=True,
                           verbose=False))
    if not n2r:
        for i in range(len(maps)):
            maps[i] = hp.reorder(maps[i], r2n=True)
    if return_cls:
        return maps, cldict
    else:
        return maps
Example #29
0
def get_mask_gal(percentage_keep=40, nside_out=512, coordinates='eq', quick=True):
    import pyfits
    from astropy.coordinates import FK5
    from astropy import units as u    
    savename = datadir+'mask_GAL0%i_%i_'%(percentage_keep,nside_out)
    savename += coordinates+'_.pkl'
    if quick: return pickle.load(open(savename,'r'))

    pp = pyfits.open(datadir+'HFI_Mask_GalPlane_2048_R1.10.fits')
    mask_gal = pp[1].data['GAL0%i'%percentage_keep]
    mask_gal = hp.reorder(mask_gal, out='RING', inp='NESTED')
    if coordinates=='gal':
        mask_out = hp.ud_grade(mask_gal, nside_out)
    if coordinates=='eq':
        nside_up = nside_out*2
        mask_gal = hp.ud_grade(mask_gal, nside_up)
        # Find the indices in an up-sampled *galactic* map that belong to these 
        # *equatorial* coordinates.
        theta, phi = hp.pix2ang(nside_up, np.arange(hp.nside2npix(nside_up)))
        ra = phi
        dec = np.pi/2.-theta
        coord = FK5(ra=ra, dec=dec, unit=(u.rad, u.rad))
        l_gal = coord.galactic.l.rad
        b_gal = coord.galactic.b.rad
        phi = l_gal
        theta = np.pi/2.-b_gal
        ind_up = hp.ang2pix(nside_up, theta, phi)
        mask_up_eq = mask_gal[ind_up]
        mask_out = hp.ud_grade(mask_up_eq, nside_out)

    pickle.dump(mask_out, open(savename,'w'))
    return mask_out
Example #30
0
def anand():
    dr8_elg = ft.read(
        '/home/mehdi/data/formehdi/pixweight_ar-dr8-0.32.0-elgsv.fits')
    nside = 256
    npix = 12 * nside * nside

    ss = [
        'GALDEPTH_R', 'GALDEPTH_G', 'GALDEPTH_Z', 'PSFSIZE_R', 'PSFSIZE_G',
        'PSFSIZE_Z', 'EBV', 'STARDENS'
    ]

    sysmaps = {}

    sysmaps['HPIX'] = np.arange(npix)  #.astype('i8')
    for ss_i in ss:
        sysmaps[ss_i] = hp.reorder(dr8_elg[ss_i], n2r=True)

    sysmaps['nran'] = hp.reorder(dr8_elg['FRACAREA'], n2r=True)
    sysmaps['ngal'] = hp.reorder(
        dr8_elg['SV'], n2r=True) * sysmaps['nran'] * hp.nside2pixarea(
            256, degrees=True)

    dataframe = pd.DataFrame(sysmaps)
    dataframe.replace([np.inf, -np.inf], value=np.nan,
                      inplace=True)  # replace inf

    dataframe.to_hdf('/home/mehdi/data/formehdi/dr8_elgsv.h5',
                     'data',
                     overwrite=True)

    hp.write_map('/home/mehdi/data/formehdi/dr8_elgsv_ngal.hp.256.fits',
                 dataframe.ngal,
                 fits_IDL=False,
                 dtype=np.float64)

    mysample = dataframe[dataframe['nran'] > 0]
    mysample.dropna(inplace=True)
    mysample.shape
    hd5_2_fits(mysample,
               ss,
               fitname='/home/mehdi/data/formehdi/dr8_elgsv.fits',
               hpmask='/home/mehdi/data/formehdi/dr8_elgsv_mask.hp.256.fits',
               hpfrac='/home/mehdi/data/formehdi/dr8_elgsv_frac.hp.256.fits',
               fitnamekfold='/home/mehdi/data/formehdi/dr8_elgsv_5r.npy',
               res=256,
               k=5)
    return 0
Example #31
0
def psd_unseen_helper(x, Nside):
    """Compute the Power Spectral Density for heaply maps (incomplete data)."""
    if len(x.shape) == 2 and x.shape[1] > 1:
        return np.stack([psd_unseen(x[ind, ]) for ind in range(len(x))])
    y = np.zeros(shape=[hp.nside2npix(Nside)])
    y[:] = hp.UNSEEN
    y[:len(x)] = x
    hatx = hp.map2alm(hp.reorder(y, n2r=True))
    return hp.alm2cl(hatx)
Example #32
0
 def __init__(self, name=f'{templ_dir}allstars17.519.9Healpixall256.dat', nside_out=256):
     self.unit = '# stars'        
     self.nstar = np.loadtxt(name)
     self.map = hp.reorder(self.nstar, n2r=True)
     self.nside = hp.get_nside(self.map)
     
     if nside_out != self.nside:
         self.map = hp.ud_grade(self.map, nside_out=nside_out, power=-2)
         warnings.warn('upgrading/downgrading SDSS star density')
def get_all_faces(Imag, nested=False):
    r"""
    This function maps a function defined on the sphere to an array of 12
    cartesian images, where each of the cartesian images corresponds to
    a pullback of the given function with respect to one of the 12 HEALPix
    charts.

    **Required parameter**

    :param numpy.ndarray Imag:
       The function defined on the sphere, in the form of a one-dimensional
       :class:`numpy.ndarray` (a HEALPix map).

       .. warning::
           If the HEALPix map ``Imag`` uses "NESTED" ordering, the parameter
           ``nested`` needs to be set to ``True``.

    **Keyword parameter**

    :param bool nested:
        This parameter determines the ordering with which the different
        HEALPix pixels are stored in the array ``Imag``; see
        http://healpix.jpl.nasa.gov/html/intronode4.htm for more details.

        If ``nested`` is set to ``False``, this signifies that ``Imag`` uses
        the "RING" ordering.

    **Return value**

    :return:
        A 3-dimensional :class:`numpy.ndarray` consisting of 12 cartesian
        images (2-dimensional arrays), where each image is a pullback of
        the input function ``Imag`` with respect to one of  the 12 HEALPix
        charts.
    """
    npix = np.shape(Imag)[0]
    assert npix % 12 == 0
    nside = hp.npix2nside(npix)
    taille_face = npix // 12
    cote = int(math.sqrt(taille_face))
    CubeFace = np.zeros((12, cote, cote))
    if not nested:
        NewIm = hp.reorder(Imag, r2n=True)
    else:
        NewIm = Imag
    index = np.zeros((cote, cote))
    index = np.array([hp.xyf2pix(nside, x, y, 0, True)
                      for x in range(nside)
                      for y in range(nside)])
    for face in range(12):
        # print("Process Face {0}".format(face))
        CubeFace[face] = np.resize(NewIm[index + taille_face * face],
                                   (cote, cote))
        # plt.figure(),imshow(np.log10(1+CubeFace[face,:,:]*1e6))
        # plt.title("face {0}".format(face)),plt.colorbar()
    return CubeFace
Example #34
0
    def hspmap(self,
               hspmap,
               pixel=None,
               nside=None,
               xsize=800,
               lonra=None,
               latra=None,
               badval=healpix.UNSEEN,
               smooth=None,
               **kwargs):
        """ Draw a healpix map with pcolormesh.

        Parameters
        ----------
        hpxmap: input healpix or HealSparse map
        pixel:  explicit pixel indices in RING scheme (required for partial healpix maps)
        nside:  explicit nside of the map (required for partial healpix maps) if
                passed while visualizing a HealSparse map it will doegrade the map to this nside.
        xsize:  resolution of the output image
        lonra:  longitude range [-180,180] (deg)
        latra:  latitude range [-90,90] (deg)
        badval: set of values considered "bad"
        smooth: gaussian smoothing kernel (deg)
        kwargs: passed to pcolormesh

        Returns
        -------
        im,lon,lat,values : mpl image with pixel longitude, latitude (deg), and values
        """
        # ADW: probably still not the best way to do this...
        import healsparse as hsp
        import healpy as hp

        if not isinstance(hspmap, hsp.HealSparseMap):
            # Assume that it is a healpix map in RING ordering
            hpxmap = hp.reorder(hspmap, r2n=True)
            hspmap = hsp.HealSparseMap(healpix_map=hpxmap,
                                       nside_coverage=nside)

        lon, lat, values = healpix.hsp2xy(hspmap,
                                          xsize=xsize,
                                          lonra=lonra,
                                          latra=latra)

        if smooth:
            msg = "Healsparse smoothing not implemented."
            raise Exception(msg)

        vmin, vmax = np.percentile(values.compressed(), [2.5, 97.5])

        defaults = dict(rasterized=True, vmin=vmin, vmax=vmax)
        setdefaults(kwargs, defaults)

        im = self.pcolormesh(lon, lat, values, **kwargs)
        self._sci(im)
        return im, lon, lat, values
 def high_pass_filt(sig, cutoff=500, nest=True):
     Nside = 512
     func = np.ones(3 * Nside)
     func[:cutoff] = 0
     alm = hp.sphtfunc.map2alm(sig)
     alm = hp.sphtfunc.almxfl(alm, func)
     hpmap = hp.sphtfunc.alm2map(alm, Nside)
     if nest:
         hpmap = hp.reorder(hpmap, r2n=True)
     return hpmap
Example #36
0
 def flat(self):
     """Get flat resolution HEALPix dataset, probability density and
     distance."""
     if self.is_3d:
         order = hp.nside2order(Localization.nside)
         t = rasterize(self.table, order)
         result = t['PROB'], t['DISTMU'], t['DISTSIGMA'], t['DISTNORM']
         return hp.reorder(result, 'NESTED', 'RING')
     else:
         return self.flat_2d,
def smooth(inpath, outpath, sigma, smooth=True):
    """Smooth the maps (to make the problem harder)."""
    def arcmin2rad(x):
        return x / 60 / 360 * 2 * np.pi

    def gaussian_smoothing(sig, sigma, nest=True):
        if sigma == 0:
            return sig
        if nest:
            sig = hp.reorder(sig, n2r=True)
        smooth = hp.sphtfunc.smoothing(sig, sigma=arcmin2rad(sigma))
        if nest:
            smooth = hp.reorder(smooth, r2n=True)
        return smooth

    def high_pass_filt(sig, cutoff=500, nest=True):
        Nside = 512
        func = np.ones(3 * Nside)
        func[:cutoff] = 0
        alm = hp.sphtfunc.map2alm(sig)
        alm = hp.sphtfunc.almxfl(alm, func)
        hpmap = hp.sphtfunc.alm2map(alm, Nside)
        if nest:
            hpmap = hp.reorder(hpmap, r2n=True)
        return hpmap

    function = gaussian_smoothing if smooth else high_pass_filt

    Nside = 1024
    ds1 = []
    ds2 = []

    filt = 'smoothed' if smooth else 'highpassed'
    for filename in os.listdir(inpath):

        if not filename.endswith('fits'):
            continue

        filepath = os.path.join(inpath, filename)
        img = hp.read_map(filepath, verbose=False)
        img = hp.reorder(img, r2n=True)
        img = hp.ud_grade(img, nside_out=Nside, order_in='NESTED')

        if '0p26' in filename:
            ds1.append(img)
        elif '0p31' in filename:
            ds2.append(img)

    ds1 = [function(el, sigma, nest=True).astype(np.float32) for el in ds1]
    ds2 = [function(el, sigma, nest=True).astype(np.float32) for el in ds2]
    np.savez(os.path.join(outpath, filt + '_class1_sigma{}'.format(sigma)),
             ds1)
    np.savez(os.path.join(outpath, filt + '_class2_sigma{}'.format(sigma)),
             ds2)
Example #38
0
    def get_superpixel(self, target_nside=4, superpixel=None):
        sample = np.zeros(self.npix, dtype=bool)
        if superpixel is None:
            superpixel = np.random.randint(0, 12*target_nside**2)

        steps = int(np.log2(self.nside) - np.log2(target_nside))
        in_superpix = hp.reorder(
            superpixel_mask(self.nside, steps) == superpixel,
            n2r=True)

        pixnumbers = self.pixnumbers[in_superpix]

        return pixnumbers[sample]
Example #39
0
    def maps2alm((file,det)):
        print "Process "+str(get_mpi_rank())+" is transforming '"+file+"'"
        mp = H.read_map(file,nest=False)
        
        # Shouldn't need this as ordering is autodetermined by read_map, but sometimes its mislabeled
        if params.get('nest2ring'): mp=H.reorder(mp,n2r=True)
        elif params.get('ring2nest'): mp=H.reorder(mp,r2n=True)
        
        num_unseen = mp[mask*mp<-1e20].shape[0]
        if (num_unseen>0 and params.get('inpaint',True)):
            print "Warning: Inpainting "+str(num_unseen)+" remaining UNSEEN pixels after masking "+file
            mp = inpaint(mp,num_degrades=2)
        else:
            print "Warning: Zeroing "+str(num_unseen)+" remaining UNSEEN pixels after masking "+file
            mp[mask*mp<-1e20] = 0

        mp*=params.get('map_rescale',1)

        if mask!=None:
            if params.get('subtract_mean',False): mp -= (sum(mask*mp)/sum(mask))
            mp*=mask
        det.insert(1,"T")
        return H.map2alm(mp,lmax=lmax)
Example #40
0
def load_planck_mask():
    '''
    gmask = fits.open(datadir+'HFI_Mask_GalPlane_2048_R1.10.fits')[1].data['GAL060']#tmpp, 40 vs 60%?
    pmask = np.ones_like(gmask, dtype=np.float)
    tmp = fits.open(datadir+'HFI_Mask_PointSrc_2048_R1.10.fits')[1].data
    for band in [100, 143, 217]:
    #for band in [217]:        #tmpp
        pmask *= tmp['F%i_05'%band]
    mask = gmask*pmask
    #mask = fits.open(datadir+'COM_CompMap_CMB-smica_2048_R1.20.fits')[1].data['VALMASK'] # tmpp, could try I_MASK
    '''
    #mask = fits.open(datadir+'COM_Mask_Likelihood_2048_R1.10.fits')[1].data['CL49']
    mask = fits.open(datadir+'COM_Mask_Likelihood_2048_R1.10.fits')[1].data['CL39']    
    #mask = fits.open(datadir+'COM_Mask_Likelihood_2048_R1.10.fits')[1].data['CL31']    
    mask = hp.reorder(mask, n2r=True)    
    return mask
Example #41
0
def load_planck_data(band):
    '''
    #tmpp, need to add these to download functions.
    planck = fits.open(datadir+'HFI_SkyMap_217_2048_R1.10_nominal.fits')[1].data['I_STOKES']
    #planck = fits.open(datadir+'HFI_SkyMap_143_2048_R1.10_nominal.fits')[1].data['I_STOKES']
    '''
    '''
    planck = fits.open(datadir+'COM_CompMap_CMB-smica_2048_R1.20.fits')[1].data['I']
    planck *= (1e-6) # convert from uK to K, SMICA only
    '''
    if band=='mb': return make_multiband_map(quick=True)
    planck = fits.open(datadir+'HFI_SkyMap_%i_2048_R1.10_nominal.fits'%band)[1].data['I_STOKES']    
    planck = hp.reorder(planck, n2r=True)
    planck[planck<(-1000e-6)]=0.
    planck[planck>(+1000e-6)]=0.    
    return planck
Example #42
0
def adaptive_healpix_histogram(
        theta, phi, max_samples_per_pixel, nside=-1, max_nside=-1, nest=False):
    """Adaptively histogram the posterior samples represented by the
    (theta, phi) points using a recursively subdivided HEALPix tree. Nodes are
    subdivided until each leaf contains no more than max_samples_per_pixel
    samples. Finally, the tree is flattened to a fixed-resolution HEALPix image
    with a resolution appropriate for the depth of the tree. If nside is
    specified, the result is resampled to another desired HEALPix resolution.
    """
    # Calculate pixel index of every sample, at the maximum 64-bit resolution.
    #
    # At this resolution, each pixel is only 0.2 mas across; we'll use the
    # 64-bit pixel indices as a proxy for the true sample coordinates so that
    # we don't have to do any trigonometry (aside from the initial hp.ang2pix
    # call).
    #
    # FIXME: Cast to uint64 needed because Healpy returns signed indices.
    ipix = hp.ang2pix(
        HEALPIX_MACHINE_NSIDE, theta, phi, nest=True).astype(np.uint64)

    # Build tree structure.
    if nside == -1 and max_nside == -1:
        max_order = HEALPIX_MACHINE_ORDER
    elif nside == -1:
        max_order = hp.nside2order(max_nside)
    elif max_nside == -1:
        max_order = hp.nside2order(nside)
    else:
        max_order = hp.nside2order(min(nside, max_nside))
    tree = HEALPixTree(ipix, max_samples_per_pixel, max_order)

    # Compute a flattened bitmap representation of the tree.
    p = tree.flat_bitmap

    # If requested, resample the tree to the output resolution.
    if nside != -1:
        p = hp.ud_grade(p, nside, order_in='NESTED', order_out='NESTED')

    # Normalize.
    p /= np.sum(p)

    if not nest:
        p = hp.reorder(p, n2r=True)

    # Done!
    return p
Example #43
0
def search_map(ras, decs, beam, nest=True, pix_per_beam=10):
    """Returns a healpix map optimised for searching on the sky.  It
    represents the Gaussian-beam convolved posterior.

    :param ras: RA posterior samples.

    :param decs: Corresponding DEC samples.

    :param beam: The beam FWHM in radians.

    :param nest: Whether to output the map in nested (default) or ring
      pixel ordering.

    :param pix_per_beam: The number of pixels in the output map per
      beam (default 10).

    :return: An array representing the posterior convolved with a
      Gaussian beam of the given size.  The array is normalised as a
      probability density per square degree.

    """

    nside = _find_nside(beam, pix_per_beam)

    thetas = np.pi/2.0 - decs

    # Create the map in ring coordinates first.
    hmap = np.bincount(hp.ang2pix(nside, thetas, ras))
    if hmap.shape[0] < hp.nside2npix(nside):
        hmap = np.concatenate((hmap, np.zeros(hp.nside2npix(nside)-hmap.shape[0])))

    hmap = hmap / float(thetas.shape[0]) / hp.nside2pixarea(nside)

    chmap = hps.smoothing(hmap, fwhm=beam, pol=False)

    if nest:
        chmap = hp.reorder(chmap, r2n=True)

    norm = np.sum(chmap) * hp.nside2pixarea(nside, degrees=True)
    
    return chmap / norm
Example #44
0
def get_SFD_map(fname='~/projects/bayestar/data/SFD_Ebv_512.fits', nside=64):
    import pyfits
    import healpy as hp
    
    fname = expanduser(fname)
    
    f = pyfits.open(fname)
    EBV_ring = f[0].data[:]
    f.close()
    
    EBV_nest = hp.reorder(EBV_ring, r2n=True)
    
    nside2_map = EBV_nest.size / 12
    
    while nside2_map > nside * nside:
        EBV_nest = downsample_by_four(EBV_nest)
        nside2_map = EBV_nest.size / 12
    
    #hp.mollview(np.log10(EBV_nest), nest=True)
    #plt.show()
    
    return EBV_nest
Example #45
0
def polar_profile(m, nest=False):
    """Obtain the marginalized polar profile of sky map.

    Parameters
    ----------

    m : np.ndarray
        The input HEALPix array.

    nest : bool, default=False
        Indicates whether the input sky map is in nested rather than
        ring-indexed HEALPix coordinates (default: ring).

    Returns
    -------

    theta : np.ndarray
        The polar angles (i.e., the colatitudes) of the isolatitude rings.

    m_int : np.ndarray
        The normalized probability density, such that `np.trapz(m_int, theta)`
        is approximately `np.sum(m)`.
    """
    npix = len(m)
    nside = hp.npix2nside(npix)
    nrings, = hp.pix2ring(nside, np.asarray([npix]))
    startpix, ringpix, costheta, sintheta, _ = hp.ringinfo(
        nside, np.arange(1, nrings))

    if nest:
        m = hp.reorder(m, n2r=True)

    theta = np.arccos(costheta)
    m_int = np.asarray(
        [m[i:i+j].sum() * stheta * 0.5 * npix / j
         for i, j, stheta in zip(startpix, ringpix, sintheta)])

    return theta, m_int
Example #46
0
def plot_mc():
	bins=[1,5,10,20,25,50]
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2.*np.pi)
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	for num, mask_file in enumerate(mask_array):
		f=np.load('prism_simul_'+mask_name[num]+'.npz')
		theory1_array_in=f['the1_in']
		theory2_array_in=f['the2_in']
		cross1_array_in=f['c1_in']
		cross2_array_in=f['c2_in']
		noise1_array_in=f['n1_in']
		noise2_array_in=f['n2_in']
		Ndq_array_in=f['ndq_in']
		Ndu_array_in=f['ndu_in']
		Nau_array_in=f['nau_in']
		Naq_array_in=f['naq_in']

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
Example #47
0
def correlate_with_planck_lensing(w1_min=15.8, w1_max=16.8):
    import pyfits
    import healpy as hp
    # get wise stuff
    print '...loading wise...'
    nmap = get_hpix(nside=2**9, w1_min=w1_min, w1_max=w1_max,
                    coord='G', quick=True)
    mask_wise = mask_from_map(nmap, coord='G',thresh_min=0.9, thresh_max=2.3)
    whok = np.where(mask_wise>0.5)[0]
    nbar = np.median(nmap[whok])
    delta = (1.*nmap-nbar)/nbar

    # get planck lensing stuff
    print '...loading planck...'
    pdatadir='/home/rkeisler/cib_delens/planck_data/'
    phi = pyfits.getdata(pdatadir+'COM_CompMap_Lensing_2048_R1.10.fits',1)['PHIBAR']
    phi = hp.reorder(phi, out='RING', inp='NESTED')
    mask_lens = pyfits.getdata(pdatadir+'COM_CompMap_Lensing_2048_R1.10.fits')['MASK']
    mask_lens = hp.reorder(mask_lens, out='RING', inp='NESTED')
    # downgrade to wise resolution
    print '...down-grading...'
    nside_wise = hp.npix2nside(len(delta))
    nside_planck = hp.npix2nside(len(phi))
    phi_dg = hp.ud_grade(phi, nside_wise)
    mask_lens_dg = hp.ud_grade(mask_lens, nside_wise)

    mask = mask_wise*mask_lens_dg
    print '...anafast...'
    cl_phi_delta = hp.anafast(mask*delta, map2=mask*phi_dg)/np.mean(mask**2.)
    # correct for phi transfer function
    tf_phi = pyfits.getdata(pdatadir+'COM_CompMap_Lensing_2048_R1.10.fits',2)['RLPP']
    cl_phi_delta /= tf_phi[0:len(cl_phi_delta)]
    nl = len(cl_phi_delta)
    ll = np.arange(nl)

    lmin=100.
    lmax=1500.
    dl=100.    
    nbins = np.ceil((lmax-lmin)/dl)
    llo = np.linspace(lmin, lmax, nbins)
    lcen = llo+0.5*dl
    delta_l_factor = 0.5
    cl_phi_delta_theory = np.load('/data/rkeisler/cl_phi_delta_mz0p65_sz0p25_b1p0.npy')
    cl_phi_delta_theory = cl_phi_delta_theory[0:len(cl_phi_delta)]
    y = cl_phi_delta/cl_phi_delta_theory
    #y = cl_phi_delta*ll**4.
    ybin = np.zeros(nbins)
    yerr = np.zeros(nbins)
    for i in np.arange(nbins):
        wh=np.where((ll>llo[i])&(ll<=(llo[i]+dl)))[0]
        ybin[i] = np.mean(y[wh])
        nl_tmp = 1.*len(wh)*delta_l_factor
        yerr[i] = np.std(y[wh])/np.sqrt(nl_tmp)

    pl.clf(); pl.errorbar(lcen, ybin, yerr=yerr)
    fs=17
    pl.xlabel('L',fontsize=fs)
    pl.ylabel(r'$C^{\phi \delta}_{\rm{meas}} / C^{\phi \delta}_{\rm{theory}}$',fontsize=fs)
    pl.title(r'Assuming dN/dz=N(0.65,0.25), b=1.0',fontsize=fs-1)
    pl.plot([0,1600],[0,0],'k--')
    pl.plot([0,1600],[1,1],'k--')
    pl.xlim(0,max(lcen)+dl/2.)
    print np.sqrt(np.sum((ybin/yerr)**2.))
    
    hp.mollview(delta*mask)
    ipdb.set_trace()
    #     plt_data = np.zeros(12 * high_res_nside**2)
    #     plt_data[high_f_map_mask] = local_fit[f]
    #     hpv.mollview(np.log10(plt_data), nest=True, sub=(len(b),2,2*f+1))
    #     plt_data[high_f_map_mask] = bb
    #     hpv.mollview(np.log10(plt_data), nest=True, sub=(len(b),2,2*f+2))
    # plt.show()

high_res_fit = np.transpose(final_w_nf).dot(high_res_x_ni)

#low_f
low_f_principals = np.array([0, 2])
low_f_mask = np.array([f in [5, 8] for f in range(len(freqso))])#np.array([(f in low_f_file['freqs']) and (f <= 20) for f in freqso])
low_f_data = low_f_file['idata'][np.array([f in freqso[low_f_mask] for f in low_f_file['freqs']])] / np.load(result_filename)['normalization'][low_f_mask][:, None]

A = np.transpose(final_w_nf)[np.ix_(low_f_mask, low_f_principals)]
b = low_f_data - [hp.reorder(hp.smoothing(hp.reorder(d, n2r=True), fwhm=low_res), r2n=True) for d in high_res_fit[low_f_mask]]
high_res_x_ni[low_f_principals] = la.inv(np.transpose(A).dot(A)).dot(A.transpose().dot(b))

#plot all components
for i in range(n_principal):
    qaz = np.copy(high_res_x_ni[i])
    qaz /= la.norm(qaz)
    if i == cmb_principal:
        hpv.mollview(qaz, sub=(3, 2, i+1), nest=True, min=np.percentile(qaz, 1), max=np.percentile(qaz, 99))
    else:
        if i <= 2:
            dy_range = 1
        else:
            dy_range = 2
        qaz *= np.sign(np.median(high_res_x_ni[i]))
Example #49
0
def correlate_theory(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Cross Correlations for Bands "+str(bands_name)

	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits.lens'
	
	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	#iqu_band_i=hdu_i['stokes iqu'].data
	#iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	ind_i=np.where( wl == wl_i)[0][0]
	ind_j=np.where( wl == wl_j)[0][0]
	
	cls=hp.read_cl(cl_file)
	simul_cmb=hp.sphtfunc.synfast(cls,max(nside_i,nside_j),fwhm=0.,new=1,pol=1);
	
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');

	##Generate CMB for file J
	
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_j,order_in='ring',order_out='ring')
	simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_j)
	tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_j,alpha_radio);
	iqu_band_j=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_j]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False)
	
	##Generate CMB for file I
	
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside_i,order_in='ring',order_out='ring')
	simul_cmb=hp.ud_grade(simul_cmb,nside_out=nside_i)
	tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl_i,alpha_radio);
	#ipdb.set_trace()
	iqu_band_i=hp.smoothing(tmp_cmb,fwhm=np.sqrt((beam_fwhm[ind_i]*np.pi/(180.*60.))**2-hp.nside2pixarea(nside_i)),verbose=False)
	


	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	return (cross1,cross2)
Example #50
0
	beamscale[i]=beamfwhm[i]/np.sqrt(pixarea[1])

tmp_noise=[2.0,2,7,4.7,2.5,2.2,4.8,14.7]
noise_const_t=np.array([tmp_noise[x]/beamscale[x] for x in range(num_wl)])*2.725e-6
tmp_noise=[2.8,3.9,6.7,4.0,4.2,9.8,29.8]
noise_const_q=np.array([tmp_noise[x]/beamscale[x] for x in range(num_wl)])*2.725e-6

q_array_1=np.zeros((num_wl,npix))
u_array_1=np.zeros((num_wl,npix))
sigma_q_1=np.zeros((num_wl,npix))
sigma_u_1=np.zeros((num_wl,npix))


cls=hp.read_cl(cl_file)
simul_cmb=hp.sphtfunc.synfast(cls,2048,fwhm=5.*np.pi/(180.*60.),new=1,pol=1);
simul_cmb=hp.reorder(simul_cmb,r2n=1);

alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
alpha_radio=hp.ud_grade(alpha_radio,nside_out=2048,order_in='ring',order_out='nested')

npix1=hp.nside2npix(2048)
t2=time()

for i in range(num_wl):
	if i in range(3):
		npix1=hp.nside2npix(1024)
	if i==3:
		npix1=hp.nside2npix(2048)

	tmp_cmb=rotate_tqu(simul_cmb,wl_p[i],alpha_radio);
	tmp_out=hp.ud_grade(tmp_cmb[1],nside_out=nside,order_in='nested',order_out='nested');
Example #51
0
def main():
	##Parameters for Binning, Number of Runs
	##	Beam correction
	use_beam=0
	N_runs=100
	bins=[1,5,10,20,25,50]
	gal_cut=[00,05,10,20,30]
	bls=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)**2
	l=np.arange(3*nside_out)
	ll=l*(l+1)/(2*np.pi)

	map_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	file_prefix=map_prefix+'prism_simulated_'
	alpha_file='/data/wmap/faraday_MW_realdata.fits'
	#wl=np.array([299792458./(band*1e9) for band in bands])
	cross1_array_in=[[],[],[]]
	cross2_array_in=[[],[],[]]
	Ndq_array_in=[[],[],[]]
	Ndu_array_in=[[],[],[]]
	Nau_array_in=[[],[],[]]
	Naq_array_in=[[],[],[]]
	noise1_array_in=[[],[],[]]
	noise2_array_in=[[],[],[]]
	theory1_array_in=[[],[],[]]
	theory2_array_in=[[],[],[]]
	

	#simulate_fields.main()
	for num, mask_file in enumerate(mask_array):
		print(Fore.WHITE+Back.RED+Style.BRIGHT+'Mask: '+mask_name[num]+Back.RESET+Fore.RESET+Style.RESET_ALL)
		count=0
		for i in [0,1,2]:
			for j in [3,4,5]:
				#for n in xrange(N_runs):
				for run in xrange(N_runs):	
					print(Fore.WHITE+Back.GREEN+Style.BRIGHT+'Correlation #{:03d}'.format(run+1)+Back.RESET+Fore.RESET+Style.RESET_ALL)
					print('Bands: {0:0>3.0f} and {1:0>3.0f}'.format(bands[i],bands[j]))
					ttmp1,ttmp2=correlate_theory(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
					theory1_array_in[count].append(ttmp1)
					theory2_array_in[count].append(ttmp2)
					tmp1,tmp2,n1,n2,n3,n4=correlate_signal(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					ntmp1,ntmp2=correlate_noise(file_prefix+'{0:0>3.0f}.fits'.format(bands[i]),file_prefix+'{0:0>3.0f}.fits'.format(bands[j]),wl[i],wl[j],alpha_file,'{0:0>3.0f}x{1:0>3.0f}'.format(bands[i],bands[j]),beam=use_beam,mask_file=mask_file)
					cross1_array_in[count].append(tmp1)
					cross2_array_in[count].append(tmp2)
					Ndq_array_in[count].append(n1)
					Ndu_array_in[count].append(n2)
					Nau_array_in[count].append(n3)
					Naq_array_in[count].append(n4)
					noise1_array_in[count].append(ntmp1)
					noise2_array_in[count].append(ntmp2)
				count+=1
		np.savez('prism_simul_'+mask_name[num]+'.npz',the1_in=theory1_array_in,the2_in=theory2_array_in,c1_in=cross1_array_in,c2_in=cross2_array_in,ndq_in=Ndq_array_in,ndu_in=Ndu_array_in,nau_in=Nau_array_in,naq_in=Naq_array_in,n1_in=noise1_array_in,n2_in=noise2_array_in)
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_theory_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(theory2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross1_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_array_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(cross2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_noise_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(noise1_array_in).tolist(),f)
				#f.close()	
				#json.dump(np.array(noise2_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Nau_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Nau_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_QxaU.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Naq_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Naq_array_in).tolist(),f)
				#f.close()	
				#f=open('cl_Ndu_FR_{0:0>3.0f}x{1:0>3.0f}_cut{2:0>2d}_UxaQ.json'.format(bands[i],bands[j],cut),'w')
				#json.dump(np.array(Ndu_array_in).tolist(),f)
				#f.close()	
			
				#fsky= 1. - np.sin(cut*np.pi/180.)
				#L=np.sqrt(fsky*4*np.pi)
				#dl_eff=2*np.pi/L

		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L

		theory1_array_in=np.array(theory1_array_in)/fsky
		theory2_array_in=np.array(theory2_array_in)/fsky
		cross1_array_in=np.array(cross1_array_in)/fsky
		cross2_array_in=np.array(cross2_array_in)/fsky
		Ndq_array_in=np.array(Ndq_array_in)/fsky
		Ndu_array_in=np.array(Ndu_array_in)/fsky
		Nau_array_in=np.array(Nau_array_in)/fsky
		Naq_array_in=np.array(Naq_array_in)/fsky
		noise1_array_in=np.array(noise1_array_in)/fsky
		noise2_array_in=np.array(noise2_array_in)/fsky


		for b in bins:
			N_dq=np.mean(Ndq_array_in,axis=1)
			N_au=np.mean(Nau_array_in,axis=1)
			delta1_in=np.sqrt(2.*abs((np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)**2+(np.mean(cross1_array_in,axis=1).T-np.mean(noise1_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
		
			cosmic1_in=np.sqrt(2./((2.*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory1_array_in,axis=1)**2)

			N_du=np.mean(Ndu_array_in,axis=1)
			N_aq=np.mean(Naq_array_in,axis=1)
			delta2_in=np.sqrt(2.*abs((np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)**2+(np.mean(cross2_array_in,axis=1).T-np.mean(noise2_array_in,axis=1).T)/2.*(N_dq+N_au)+N_dq*N_au/2.).T/((2.*l+1.)*np.sqrt(b**2+dl_eff**2)*fsky))
			cosmic2_in=np.sqrt(2./((2*l+1)*np.sqrt(b**2+dl_eff**2)*fsky)*np.mean(theory2_array_in,axis=1)**2)

			cross1_array=[[],[],[]]
			cross2_array=[[],[],[]]
			Ndq_array=[[],[],[]]
			Ndu_array=[[],[],[]]
			Nau_array=[[],[],[]]
			Naq_array=[[],[],[]]
			noise1_array=[[],[],[]]
			noise2_array=[[],[],[]]
			theory1_array=[[],[],[]]
			theory2_array=[[],[],[]]
			cosmic1=[[],[],[]]
			cosmic2=[[],[],[]]
			delta1=[[],[],[]]
			delta2=[[],[],[]]
        		
			plot_l=[]
			if( b != 1):
				for m in xrange(len(cross1_array_in)):
		        		for n in xrange(len(cross1_array_in[0])):
		        		        tmp_t1=bin_llcl.bin_llcl(ll*theory1_array_in[m][n]/bls,b)
		        		        tmp_t2=bin_llcl.bin_llcl(ll*theory2_array_in[m][n]/bls,b)
						tmp_c1=bin_llcl.bin_llcl(ll*cross1_array_in[m][n]/bls,b)
		        		        tmp_c2=bin_llcl.bin_llcl(ll*cross2_array_in[m][n]/bls,b)
						tmp_n1=bin_llcl.bin_llcl(ll*noise1_array_in[m][n]/bls,b)
		        		        tmp_n2=bin_llcl.bin_llcl(ll*noise2_array_in[m][n]/bls,b)
		        		        
						theory1_array[m].append(tmp_t1['llcl'])
						theory2_array[m].append(tmp_t2['llcl'])
						
						cross1_array[m].append(tmp_c1['llcl'])
						cross2_array[m].append(tmp_c2['llcl'])
						
						noise1_array[m].append(tmp_n1['llcl'])
						noise2_array[m].append(tmp_n2['llcl'])
		        		        
						if n == len(cross1_array_in[0])-1:
		        		                plot_l=tmp_c1['l_out']
					tmp_c1=bin_llcl.bin_llcl(ll*cosmic1_in[m]/bls,b)
					tmp_d1=bin_llcl.bin_llcl(ll*delta1_in[m]/bls,b)
					cosmic1[m]=tmp_c1['llcl']
					delta1[m]=tmp_d1['llcl']

					tmp_c2=bin_llcl.bin_llcl(ll*cosmic2_in[m]/bls,b)
					tmp_d2=bin_llcl.bin_llcl(ll*delta2_in[m]/bls,b)
					cosmic2[m]=tmp_c2['llcl']
					delta2[m]=tmp_d2['llcl']
					
			else:
				plot_l=l
				theory1_array=np.multiply(ll/bls,theory1_array_in)
				cross1_array=np.multiply(ll/bls,cross1_array_in)
				noise1_array=np.multiply(ll/bls,noise1_array_in)
				theory2_array=np.multiply(ll/bls,theory2_array_in)
				cross2_array=np.multiply(ll/bls,cross2_array_in)
				noise2_array=np.multiply(ll/bls,noise2_array_in)
				cosmic1=cosmic1_in*ll/bls
				cosmic2=cosmic2_in*ll/bls
				delta1=delta1_in*ll/bls
				delta2=delta2_in*ll/bls
			#noise1=np.mean(noise1_array,axis=1)
			#noise2=np.mean(noise2_array,axis=1)
        		theory_array = np.add(theory1_array,theory2_array)
        		theory=np.mean(theory_array,axis=1)
        		dtheory=np.std(theory_array,axis=1,ddof=1)
        		cross_array = np.add(np.subtract(cross1_array,noise1_array),np.subtract(cross2_array,noise2_array))
        		cross=np.mean(cross_array,axis=1)
        		dcross=np.std(cross_array,axis=1,ddof=1)
        		cosmic=np.sqrt(np.array(cosmic1)**2+np.array(cosmic2)**2)
        		delta=np.sqrt(np.array(delta1)**2+np.array(delta2)**2)

			cross=np.average(cross,weights=1./dcross**2,axis=0)
			theory=np.average(theory,weights=1./dcross**2,axis=0)
			dtheory=np.average(dtheory,weights=1./dcross**2,axis=0)
			cosmic=np.average(cosmic,weights=1./dcross**2,axis=0)
			delta=np.average(delta,weights=1./dcross**2,axis=0)
			dcross=np.sqrt(np.average(dcross**2,weights=1./dcross**2,axis=0))

			#theory1=np.mean(theory1_array,axis=0)
			#dtheory1=np.std(theory1_array,axis=0,ddof=1)
			#cross1=np.mean(cross1_array,axis=0)
			#dcross1=np.std(np.subtract(cross1_array,noise1),axis=0,ddof=1)
			#ipdb.set_trace()
			plot_binned.plotBinned((cross)*1e12,dcross*1e12,plot_l,b,'prism_FR_simulation',title='PRISM FR Correlator',theory=theory*1e12,dtheory=dtheory*1e12,delta=delta*1e12,cosmic=cosmic*1e12)

			#theory2=np.mean(theory2_array,axis=0)
			#dtheory2=np.std(theory2_array,axis=0,ddof=1)
			#cross2=np.mean(cross2_array,axis=0)
			##delta2=np.mean(delta2_array,axis=0)
			#dcross2=np.std(np.subtract(cross2_array,noise2),axis=0,ddof=1)
			##ipdb.set_trace()
			#plot_binned.plotBinned((cross2-noise2)*1e12,dcross2*1e12,plot_l,b,'Cross_43x95_FR_UxaQ', title='Cross 43x95 FR UxaQ',theory=theory2*1e12,dtheory=dtheory2*1e12,delta=delta2*1e12,cosmic=cosmic2*1e12)
			#ipdb.set_trace()
    
			if b == 25 :
				a_scales=np.linspace(-2,4,121)
				chi_array=[]
				for a in a_scales:
					chi_array.append(np.sum( (cross - a*theory)**2/(dcross)**2))
				ind = np.argmin(chi_array)
			#likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sqrt(2*np.pi)
				likelihood=np.exp(np.multiply(-1./2.,chi_array))/np.sum(np.exp(np.multiply(-1./2.,chi_array))*.05)

				Sig=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise=np.std(np.sum(cross_array/dcross**2,axis=1)/np.sum(1./dcross**2))
				Sig1=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise1=np.std(np.sum(cross_array*(theory/dcross)**2,axis=1)/np.sum((theory/dcross)**2))
				SNR=Sig/Noise
				SNR1=Sig1/Noise1
				
				Sig2=np.sum(cross/(dcross**2))/np.sum(1./dcross**2)
				Noise2=np.sqrt(1./np.sum(1./dcross**2))
				Sig3=np.sum(cross*(theory/dcross)**2)/np.sum((theory/dcross)**2)
				Noise3=np.sqrt(np.sum(theory**2)/np.sum(theory**2/dcross**2))
				SNR2=Sig2/Noise2
				SNR3=Sig3/Noise3
				
				#ipdb.set_trace()
				fig,ax1=plt.subplots(1,1)

				ax1.plot(a_scales,likelihood,'k.')
				ax1.set_title('Faraday Rotation Correlator')
				ax1.set_xlabel('Likelihood scalar')
				ax1.set_ylabel('Likelihood of Correlation')
				fig.savefig('FR_Correlation_Likelihood.png',format='png')
				fig.savefig('FR_Correlation_Likelihood.eps',format='eps')
				#ipdb.set_trace()
				f=open('Maximum_likelihood.txt','w')
				f.write('Maximum Likelihood: {0:2.5f}%  for scale factor {1:.2f} \n'.format(float(likelihood[ind]*100),float(a_scales[ind])))
				f.write('Probability of scale factor =1: {0:2.5f}% \n \n'.format(float(likelihood[np.where(a_scales ==1)])*100))
				f.write('Detection Levels using Standard Deviation \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR,Sig, Noise))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n \n'.format(SNR1,Sig1,Noise))
				f.write('Detection using Theoretical Noise \n')
				f.write('Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR2,Sig2, Noise2))
				f.write('Weighted Detection Level: {0:.4f} sigma, Signal= {1:.4e}, Noise= {2:.4e} \n'.format(SNR3,Sig3,Noise3))
				f.close()

			#if b == 1 :
			#	xbar= np.matrix(ll[1:]*(cross-np.mean(cross))[1:]).T
			#	vector=np.matrix(ll[1:]*cross[1:]).T
			#	mu=np.matrix(ll[1:]*theory[1:]).T
			#	fact=len(xbar)-1
			#	cov=(np.dot(xbar,xbar.T)/fact).squeeze()
			#	ipdb.set_trace()
			#	likelihood=np.exp(-np.dot(np.dot((vector-mu).T,lin.inv(cov)),(vector-mu))/2. )/(np.sqrt(2*np.pi*lin.det(cov)))
			#	print('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f=open('FR_likelihood.txt','w')
			#	f.write('Likelihood of fit is #{0:.5f}'.format(likelihood[0,0]))
			#	f.close()

				#subprocess.call('mv Maximum_likelihood.txt  gal_cut_{0:0>2d}/'.format(cut), shell=True)
				subprocess.call('mv *01*.png bin_01/', shell=True)
				subprocess.call('mv *05*.png bin_05/', shell=True)
				subprocess.call('mv *10*.png bin_10/', shell=True)
				subprocess.call('mv *20*.png bin_20/', shell=True)
				subprocess.call('mv *25*.png bin_25/', shell=True)
				subprocess.call('mv *50*.png bin_50/', shell=True)
				subprocess.call('mv *.eps eps/', shell=True)
Example #52
0
def correlate_noise(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Noise Correlation for Bands "+str(bands)


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')
	#iqu_band_i=hdu_i['stokes iqu'].data
	#iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	

	ind_i=np.argwhere( wl == wl_i)[0][0]
	ind_j=np.argwhere( wl == wl_j)[0][0]

	npix_i=hp.nside2npix(nside_i)
	npix_j=hp.nside2npix(nside_j)
	iqu_band_i=np.zeros((3,npix_i))
	iqu_band_j=np.zeros((3,npix_j))
	
	sigma_i=[noise_const_pol[ind_i]*np.random.normal(0,1,npix_i),noise_const_pol[ind_i]*np.random.normal(0,1,npix_i)]
	sigma_j=[noise_const_pol[ind_j]*np.random.normal(0,1,npix_j),noise_const_pol[ind_j]*np.random.normal(0,1,npix_j)]
	
	iqu_band_i[1]=np.copy(sigma_i[0])
	iqu_band_i[2]=np.copy(sigma_i[1])
	iqu_band_j[1]=np.copy(sigma_j[0])
	iqu_band_j[2]=np.copy(sigma_j[1])
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	#ipdb.set_trace()
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	#cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	#cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	#hp.write_cl('cl_'+bands+'_FR_noise_QxaU.fits',cross1)
	#hp.write_cl('cl_'+bands+'_FR_noise_UxaQ.fits',cross2)
	return (cross1,cross2)
Example #53
0
def main():	
	##Define Files used to make maps
	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits.lens'
	output_prefix='/home/matt/Planck/data/faraday/simul_maps/'
	synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits'
	dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits'
	gamma_dust=6.626e-34/(1.38e-23*21)
	
	##Define Parameters used to simulate Planck Fields
	bands=np.array([30.,44.,70.,100.,143.,217.,353.])
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	#noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6
	#noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	beam_fwhm=np.array([32.29,27,13.21,9.67,7.26,4.96,4.93])
	

	pix_area_array=np.array([np.repeat(hp.nside2pixarea(1024),3),np.repeat(hp.nise2pixarea(2048),4)])
	pix_area_array=np.sqrt(pix_area_array)*60*180./np.pi
	#beam_fwhm=np.array([33.,24.,14.,10.,7.1,5.0,5.0])
	#noise_const_temp=np.array([2.0,2.7,4.7,2.5,2.2,4.8,14.7])*2.7255e-6
	#noise_const_pol=np.array([2.8,3.9,6.7,4.0,4.2,9.8,29.8])*2.7255e-6
	noise_const_temp=np.array([2.5,2.7,3.5,1.29,.555,.78,2.56])/pix_area_array*60.e-6
	noise_const_pol=np.array([3.5,4.0,5.0,1.96,1.17,1.75,7.31])/pix_area_array*60.e-6
	krj_to_kcmb=np.array([1.0217,1.0517,np.mean([1.1360,1.1405,1.1348]), np.mean([1.3058,1.3057]),np.mean([1.6735,1.6727]),np.mean([3,2203,3.2336,3.2329,3.2161]),np.mean([14.261,14.106])])*1e-6
	sync_factor=krj_to_kcmb*np.array([20.*(.408/x)**2 for x in bands])
	dust_factor=krj_to_kcmb*np.array([163.e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353)**2.54 for x in bands])
	nside=2048
	npix=hp.nside2npix(nside)
	pix_area=hp.nside2pixarea(nside)
	
	##Reverse ordre of arrays to Simulate larger NSIDE maps first
	bands = bands[::-1]
	beam_fwhm= beam_fwhm[::-1]
	noise_const_temp = noise_const_temp[::-1]
	noise_const_pol = noise_const_pol[::-1]

	wl=np.array([299792458./(band*1e9) for band in bands])
	num_wl=len(wl)
	tqu_array=[]
	sigma_array=[]
	
	LFI=False
	LFI_IND=np.where(bands == 70)[0][0]

	cls=hp.read_cl(cl_file)
	print 'Generating Map'
	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*nside-1)
	hdu_sync=fits.open(synchrotron_file)
	sync_q=hdu_sync[1].data.field(0)
	sync_u=hdu_sync[1].data.field(1)
	
	sync_q=hp.reorder(sync_q,n2r=1)
	tmp_alm=hp.map2alm(sync_q)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
	sync_q=hp.alm2map(tmp_alm,nside)
	#sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
	sync_q=hp.ud_grade(sync_q,nside_out=nside)
	
	sync_u=hp.reorder(sync_u,n2r=1)
	tmp_alm=hp.map2alm(sync_u)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
	sync_u=hp.alm2map(tmp_alm,nside)
	#sync_u=hp.smoothing(sync_u,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
	sync_u=hp.ud_grade(sync_u,nside_out=nside)
	hdu_sync.close()
	

	bl_10=hp.gauss_beam(10*np.pi/(180.*60.),3*nside-1)
	hdu_dust=fits.open(dust_file)
	dust_q=hdu_dust[1].data.field(0)
	dust_u=hdu_dust[1].data.field(1)
	hdu_dust.close()
	
	dust_q=hp.reorder(dust_q,n2r=1)
	tmp_alm=hp.map2alm(dust_q)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
	dust_q=hp.alm2map(tmp_alm,nside)
	#dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True)
	dust_q=hp.ud_grade(dust_q,nside)
	
	dust_u=hp.reorder(dust_u,n2r=1)
	tmp_alm=hp.map2alm(dust_u)
	tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
	dust_u=hp.alm2map(tmp_alm,nside)
	#dust_q=hp.smoothing(dust_q,fwhm=10.0*np.pi/(180.*60.),verbose=False,invert=True)
	dust_u=hp.ud_grade(dust_u,nside)

	nside=2048
	pix_area=hp.nside2pixarea(nside)
	
	prim=fits.PrimaryHDU()
	prim.header['COMMENT']="Simulated Planck Data with Polarization"
	prim.header['COMMENT']="Created using CAMB"
	#ipdb.set_trace()
	for i in range(num_wl):
		if LFI:
			nside=1024
			npix=hp.nside2npix(1024)
			simul_cmb=hp.ud_grade(simul_cmb,nside)
			alpha_radio=hp.ud_grade(alpha_radio,nside)
			sync_q=hp.ud_grade(sync_q,nside)
			sync_u=hp.ud_grade(sync_u,nside)
			dust_q=hp.ud_grade(sync_q,nside)
			dust_u=hp.ud_grade(sync_u,nside)
			pix_area=hp.nside2pixarea(nside)
		
		tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio);
		tmp_didqdu=np.array([np.random.normal(0,1,npix)*noise_const_temp[i], np.random.normal(0,1,npix)*noise_const_pol[i] , np.random.normal(0,1,npix)*noise_const_pol[i]])
		tmp_tqu=np.copy(tmp_cmb)
		
		#Add Polarized Foreground emission
		tmp_tqu[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q    )
		tmp_tqu[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u    )
	#	tmp_tqu[1]+= np.copy(sync_factor[i]*sync_q)
	#	tmp_tqu[2]+= np.copy(sync_factor[i]*sync_u)
		tmp_tqu=hp.sphtfunc.smoothing(tmp_tqu,fwhm=beam_fwhm[i]*np.pi/(180.*60.),pol=1)
	
		#Add Noise After smooothing
		#tmp_tqu+=tmp_didqdu 

		sig_hdu=fits.ImageHDU(tmp_tqu)
		sig_hdu.header['TFIELDS']=(len(tmp_tqu),'number of fields in each row')
		sig_hdu.header["TTYPE1"]=("STOKES I")
		sig_hdu.header["TTYPE2"]=("STOKES Q")
		sig_hdu.header["TTYPE3"]=("STOKES U")
		sig_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		sig_hdu.header["TFORM1"]='E'
		sig_hdu.header["TFORM2"]='E'
		sig_hdu.header["TFORM3"]='E'
		
		sig_hdu.header["EXTNAME"]="STOKES IQU"
		sig_hdu.header['POLAR']= 'T'
		sig_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		sig_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		sig_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		sig_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		sig_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		sig_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed')
		sig_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		sig_hdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		


		err_hdu=fits.ImageHDU(tmp_didqdu)
		err_hdu.header['TFIELDS']=(len(tmp_didqdu),'number of fields in each row')
		err_hdu.header["TTYPE1"]=("UNCERTAINTY I")
		err_hdu.header["TTYPE2"]=("UNCERTAINTY Q")
		err_hdu.header["TTYPE3"]=("UNCERTAINTY U")
		err_hdu.header["TUNIT1"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TUNIT2"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TUNIT3"]=("K_{CMB} Thermodynamic", 'Physical Units of Map')
		err_hdu.header["TFORM1"]='E'
		err_hdu.header["TFORM2"]='E'
		err_hdu.header["TFORM3"]='E'
		
		err_hdu.header["EXTNAME"]="UNCERTAINTIES"
		err_hdu.header['POLAR']= 'T'
		err_hdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		err_hdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		err_hdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		err_hdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		err_hdu.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		err_hdu.header['OBS_NPIX']=(npix,'Number of pixels observed')
		err_hdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		err_hdu.header["COORDSYS"]=('G','Pixelization coordinate system')


	#	ipdb.set_trace()
		tblist=fits.HDUList([prim,sig_hdu,err_hdu])
		tblist.writeto(output_prefix+'planck_simulated_{0:0>3.0f}.fits'.format(bands[i]),clobber=True)
		print "planck_simulated_{:0>3.0f}.fits".format(bands[i])
		print "Nside = {:0>4d}".format(nside)
		if i+1 == LFI_IND:
			LFI=True
Example #54
0
def view_planck_likelihood_masks():
    tmp = fits.open(datadir+'COM_Mask_Likelihood_2048_R1.10.fits')[1].data
    for k in ['CL31','CL39','CL49']:
        hp.mollview(hp.reorder(tmp[k], n2r=True), title=k)
Example #55
0
hdu_free = fits.open(free_file)
free_EM = hdu_free[1].data.field('EM_ML')
free_T = hdu_free[1].data.field('TEMP_ML')
hdu_free.close()


hdu_sync = fits.open(sync_file)
sync_map = hdu_sync[1].data.field('I_ML') * 1e-6 -2.725  ##Convert K_RJ to K_CMB
hdu_sync.close()

hdu_radio = fits.open(radio_file)
radio_map = hdu_radio[1].data.field('TEMPERATURE') * 1e-3  -2.725#convert to  KCMB
counts = hdu_radio[1].data.field('SENSITIVITY')
hdu_radio.close()

sync_map = hp.reorder(sync_map,n2r=1)
dust_map = hp.reorder(dust_map,n2r=1)
free_EM = hp.reorder(free_EM,n2r=1)
free_T = hp.reorder(free_T,n2r=1)
radio_map = hp.reorder(radio_map,n2r=1)
counts = hp.reorder(counts,n2r=1)

cmb_mask = hp.ud_grade(cmb_mask,256)

##construct free-free intensity map
#
#gff = np.log( np.exp( 5.690 - np.sqrt(3.)/np.pi* np.log( freq * (free_T*1e-4)**(-1.5)) ) + np.e)
#tau = 0.05468 * (free_T)**(-1.5)*freq**(-2) * free_EM*gff
#free_map =  1e6*free_T*(1-np.exp(-tau))

Example #56
0
def main():	
	t1=time()
	radio_file='/data/wmap/faraday_MW_realdata.fits'
	cl_file='/home/matt/wmap/simul_scalCls.fits'
	output_prefix='/home/matt/quiet/quiet_maps/'
	nside=1024
	nside_in=1024
	npix=hp.nside2npix(nside)
	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
	pix_area= np.sqrt(hp.nside2pixarea(1024))*60*180./np.pi
	noise_const_q=np.array([36./pix_area for f in q_fwhm])*1e-6
#	noise_const_q=np.array([36./fwhm for fwhm in q_fwhm])*1e-6
	centers=np.array([convertcenter([12,4],-39),convertcenter([5,12],-39),convertcenter([0,48],-48),convertcenter([22,44],-36)])
	wl=np.array([299792458./(band*1e9) for band in bands])
	
	synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits'
	dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits'
	dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits'
	dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits'
	
	##Dust intensity scaling factor
	hdu_dust_t=fits.open(dust_t_file)
	dust_t=hdu_dust_t[1].data.field('TEMP_ML')
	hdu_dust_t.close()
	
	dust_t=hp.reorder(dust_t,n2r=1)
	dust_t=hp.ud_grade(dust_t,nside_in)
	
	hdu_dust_b=fits.open(dust_b_file)
	dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL')
	hdu_dust_b.close
	
	dust_beta=hp.reorder(dust_beta,n2r=1)	
	dust_beta=hp.ud_grade(dust_beta,nside_in)
	
	gamma_dust=6.626e-34/(1.38e-23*dust_t)

	krj_to_kcmb=np.array([1.,1.])
	sync_factor=krj_to_kcmb*np.array([1e-6*(30./x)**2 for x in bands])
	dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353.)**(1+dust_beta) for i,x in enumerate(bands)])

	print('Preparing Foregrounds')	
        bl_40=hp.gauss_beam(40.*np.pi/(180.*60.),3*1024-1)
        bl_10=hp.gauss_beam(10.*np.pi/(180.*60.),3*1024-1)
        
        hdu_sync=fits.open(synchrotron_file)
        sync_q=hdu_sync[1].data.field(0)
        sync_u=hdu_sync[1].data.field(1)
        
        sync_q=hp.reorder(sync_q,n2r=1)
        tmp_alm=hp.map2alm(sync_q)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
        #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
        sync_q=hp.alm2map(tmp_alm,nside_in,verbose=False)
        
        sync_u=hp.reorder(sync_u,n2r=1)
        tmp_alm=hp.map2alm(sync_u)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_40)
        #simul_sync_q=hp.smoothing(sync_q,fwhm=40.*np.pi/(180.*60.),verbose=False,invert=True)
        sync_u=hp.alm2map(tmp_alm,nside_in,verbose=False)
        hdu_sync.close()
        
        hdu_dust=fits.open(dust_file)
        dust_q=hdu_dust[1].data.field(0)
        dust_u=hdu_dust[1].data.field(1)
        hdu_dust.close()
        
        dust_q=hp.reorder(dust_q,n2r=1)
        tmp_alm=hp.map2alm(dust_q)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
        #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True)
        dust_q=hp.alm2map(tmp_alm,nside_in,verbose=False)
        dust_q_back=np.copy(dust_q)
        
        dust_u=hp.reorder(dust_u,n2r=1)
        tmp_alm=hp.map2alm(dust_u)
        tmp_alm=hp.almxfl(tmp_alm,1./bl_10)
        #simul_dust_q=hp.smoothing(dust_q,fwhm=10.*np.pi/(180.*60.),verbose=False,invert=True)
        dust_u=hp.alm2map(tmp_alm,nside_in,verbose=False)
        dust_u_back=np.copy(dust_u)
	
	print 'Generating Map'
	cls=hp.read_cl(cl_file)
	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
	

	num_wl=len(wl)
        no_noise=[]
	t_array=np.zeros((num_wl,npix))	
	q_array=np.zeros((num_wl,npix))
	sigma_q=np.zeros((num_wl,npix))
	u_array=np.zeros((num_wl,npix))
	sigma_u=np.zeros((num_wl,npix))
	for i in range(num_wl):
		print('\tFrequency: {0:2.1f}'.format(bands[i]))
		tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio);
                no_noise.append(hp.smoothing(np.copy(tmp_cmb), fwhm=q_fwhm[i]*np.pi/(180*60.),pol=1,verbose=False))
		sigma_q[i]=np.random.normal(0,1,npix)*noise_const_q[i]
		sigma_u[i]=np.random.normal(0,1,npix)*noise_const_q[i]
		tmp_cmb[1]+= np.copy( dust_factor[i]*dust_q+sync_factor[i]*sync_q    )
		tmp_cmb[2]+= np.copy( dust_factor[i]*dust_u+sync_factor[i]*sync_u    )
		tmp_out=hp.sphtfunc.smoothing(tmp_cmb,fwhm=q_fwhm[i]*np.pi/(180.*60.),pol=1,verbose=False)
		t_array[i],q_array[i],u_array[i]=tmp_out
		#sigma_q[i]=hp.sphtfunc.smoothing(tmp_q,fwhm=np.pi/180.)
		#sigma_u[i]=hp.sphtfunc.smoothing(tmp_u,fwhm=np.pi/180.)
	
	print "Time to Write Fields"
	dx=1./(60.)*3
	nx=np.int(15/dx)
	ny=nx
	all_pix=[]
	field_pix=[]
	square_pix=[]
	quiet_mask=np.zeros(npix)
	prim=fits.PrimaryHDU()
	prim.header['COMMENT']="Simulated Quiet Data"
	prim.header['COMMENT']="Created using CAMB"
	for p in xrange(len(centers)):
		coords=regioncoords(centers[p,0],centers[p,1],dx,nx,ny)
		coords_sky=SkyCoord(ra=coords[:,0],dec=coords[:,1],unit=u.degree,frame='fk5')
		phi=coords_sky.galactic.l.deg*np.pi/180.
		theta=(90-coords_sky.galactic.b.deg)*np.pi/180.
		pixels=hp.ang2pix(nside,theta,phi)
		quiet_mask[pixels]=1
		unique_pix=(np.unique(pixels).tolist())
		field_pix.append(unique_pix)
		square_pix.append(pixels)
		all_pix.extend(unique_pix)
		pix_col=fits.Column(name='PIXEL',format='1J',array=unique_pix)
		for f in xrange(num_wl):
			region_mask=np.zeros(npix)
			region_mask[pixels]=1
			region_map_t=np.array(t_array[f][pixels]).reshape((nx,ny))
			region_map_q=np.array(q_array[f][pixels]).reshape((nx,ny))
			region_map_u=np.array(u_array[f][pixels]).reshape((nx,ny))
			region_delta_q=np.array(sigma_q[f][pixels]).reshape((nx,ny))
			region_delta_u=np.array(sigma_u[f][pixels]).reshape((nx,ny))
			prim=fits.PrimaryHDU()
			q_head=fits.ImageHDU([region_map_t,region_map_q,region_map_u],name="STOKES IQU")
			q_head.header['TFIELDS']=(3,'number of fields in each row')
			q_head.header['TTYPE1']=('SIGNAL', "STOKES I, Temperature")
			q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TTYPE2']='STOKES Q'
			q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TTYPE3']='STOKES U'
			q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			q_head.header['TFORM1']='E'
			q_head.header['TFORM1']='E'
			q_head.header['TFORM2']='E'
			q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
			q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
			q_head.header["COORDSYS"]=('G','Pixelization coordinate system')
			q_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
			q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
			q_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
			err_head=fits.ImageHDU([region_delta_q,region_delta_u],name="Q/U UNCERTAINTIES")
			err_head.header['TFIELDS']=(2,'number of fields in each row')
			err_head.header['NSIDE']=1024
			err_head.header['ORDERING']='RING'
			err_head.header['TTYPE1']='SIGMA Q'
			err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			err_head.header['TTYPE2']='SIGMA U'
			err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
			err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
			err_head.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
			err_head.header['INDXSCHM']=('EXPLICIT','indexing : IMPLICIT of EXPLICIT')
			m_head=fits.ImageHDU(region_mask,name='MASK')	
			sqr_pix_col=fits.Column(name='PIXELS',format='1J',array=pixels)
			sqr_pix_cols=fits.ColDefs([sqr_pix_col])
			sqr_pix_head=fits.BinTableHDU.from_columns(sqr_pix_cols)
			hdulist=fits.HDUList([prim,q_head,err_head,m_head,sqr_pix_head])
			hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}_cmb{:1d}.fits".format(bands[f],p+1),clobber=True)
			print '{:.1f}_cmb{:1d}.fits'.format(bands[f],p+1)
	
	
	mask_head=fits.ImageHDU(quiet_mask,name='MASK')
	pix_col=fits.Column(name='PIXEL',format='1J',array=all_pix)
	field_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=field_pix[0])
	field_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=field_pix[1])
	field_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=field_pix[2])
	field_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=field_pix[3])
	
	sqr_pix_col1=fits.Column(name='PIXELS FIELD 1',format='1J',array=square_pix[0])
	sqr_pix_col2=fits.Column(name='PIXELS FIELD 2',format='1J',array=square_pix[1])
	sqr_pix_col3=fits.Column(name='PIXELS FIELD 3',format='1J',array=square_pix[2])
	sqr_pix_col4=fits.Column(name='PIXELS FIELD 4',format='1J',array=square_pix[3])
	cols1=fits.ColDefs([sqr_pix_col1,sqr_pix_col2,sqr_pix_col3,sqr_pix_col4])
	tbhdu1=fits.BinTableHDU.from_columns(cols1)
	tbhdu1.header['TFIELDS']=(4,'number of fields in each row')
	tbhdu1.header["TTYPE1"]=("PIXELS CMB FIELD 1","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE2"]=("PIXELS CMB FIELD 2","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE3"]=("PIXELS CMB FIELD 3","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["TTYPE4"]=("PIXELS CMB FIELD 4","SQUARE PIXEL NUMBER BY FIELD")
	tbhdu1.header["EXTNAME"]="SQUARE PIXELS"
	tbhdu1.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
	tbhdu1.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
	tbhdu1.header["NSIDE"]=(nside,'Healpix Resolution paramter')
	tbhdu1.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
	tbhdu1.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
	tbhdu1.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
	tbhdu1.header["COORDSYS"]=('G','Pixelization coordinate system')
	for i in xrange(num_wl):
		cut_t,cut_q,cut_u=t_array[i][all_pix],q_array[i][all_pix],u_array[i][all_pix]
		cut_dq,cut_du=sigma_q[i][all_pix],sigma_u[i][all_pix]
		col_t=fits.Column(name='SIGNAL',format='1E',unit='K_{CMB}',array=cut_t)
		col_q=fits.Column(name='STOKES Q',format='1E',unit='K_{CMB}',array=cut_q)
		col_u=fits.Column(name='STOKES U',format='1E',unit='K_{CMB}',array=cut_u)
		col_dq=fits.Column(name='Q ERROR',format='1E',unit='K_{CMB}',array=cut_dq)
		col_du=fits.Column(name='U ERROR',format='1E',unit='K_{CMB}',array=cut_du)
		cols=fits.ColDefs([pix_col,col_q,col_u,col_dq,col_du])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['TFIELDS']=(5,'number of fields in each row')
		tbhdu.header["TTYPE2"]=("SIGNAL","STOKES T")
		tbhdu.header["EXTNAME"]="SIGNAL"
		tbhdu.header['POLAR']= 'T'
		tbhdu.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		tbhdu.header["NSIDE"]=(1024,'Healpix Resolution paramter')
		tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
		tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
		tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		tblist=fits.HDUList([prim,tbhdu])
		tblist.writeto(output_prefix+'quiet_partial_simulated_{:.1f}.fits'.format(bands[i]),clobber=True)
	
		q_head=fits.ImageHDU(np.array([t_array[i],q_array[i],u_array[i]]), name='STOKES IQU')
		q_head.header['TFIELDS']=(3,'number of fields in each row')
		q_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature")
		q_head.header['TYPE2']='STOKES Q'
		q_head.header['TYPE3']='STOKES U'
		q_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		q_head.header['TFORM1']='E'
		q_head.header['TFORM1']='E'
		q_head.header['TFORM2']='E'
		q_head.header['EXTNAME']='STOKES IQU'
		q_head.header['POLAR']= 'T'
		q_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		q_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		q_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		q_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
		q_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		q_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		q_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		q_head.header["COORDSYS"]=('G','Pixelization coordinate system')
		#########################

		theo_head=fits.ImageHDU(no_noise[i], name='No Noise IQU')
		theo_head.header['TFIELDS']=(3,'number of fields in each row')
		theo_head.header['TYPE1']=('SIGNAL', "STOKES I, Temperature")
		theo_head.header['TYPE2']='STOKES Q'
		theo_head.header['TYPE3']='STOKES U'
		theo_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TUNIT3']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		theo_head.header['TFORM1']='E'
		theo_head.header['TFORM1']='E'
		theo_head.header['TFORM2']='E'
		theo_head.header['EXTNAME']='no noise iqu'
		theo_head.header['POLAR']= 'T'
		theo_head.header['POLCCONV']=('COSMO','Coord. Convention for polarisation COSMO/IAU')
		theo_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		theo_head.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		theo_head.header['NSIDE']=(1024,'Healpix Resolution paramter')
		theo_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		theo_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		theo_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		theo_head.header["COORDSYS"]=('G','Pixelization coordinate system')
		

####################################
		#tblist=fits.HDUList([prim,tbhdu])
		err_head=fits.ImageHDU(np.array([sigma_q[i],sigma_u[i]]),name='Q/U UNCERTAINTIES')
		err_head.header['TFIELDS']=(2,'number of fields in each row')
		err_head.header['NSIDE']=1024
		err_head.header['ORDERING']='RING'
		err_head.header['TTYPE1']='SIGMA Q'
		err_head.header['TTYPE2']='SIGMA U'
		err_head.header['TUNIT1']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		err_head.header['TUNIT2']=('K_{CMB} Thermodynamic', 'Physical Units of Map')
		err_head.header['TFORM1']='E'
		err_head.header['TFORM2']='E'
		err_head.header['EXTNAME']='Q/U UNCERTAINTIES'
		err_head.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		err_head.header['OBJECT']=('FULLSKY','Sky coverage, either FULLSKY or PARTIAL')
		err_head.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		err_head.header['BAD_DATA']=(hp.UNSEEN,'Sentinel value given to bad pixels')
		cols=fits.ColDefs([field_pix_col1,field_pix_col2,field_pix_col3,field_pix_col4])
		tbhdu=fits.BinTableHDU.from_columns(cols)
		tbhdu.header['TFIELDS']=(4,'number of fields in each row')
		tbhdu.header["TTYPE1"]=("PIXELS CMB FIELD 1","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE2"]=("PIXELS CMB FIELD 2","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE3"]=("PIXELS CMB FIELD 3","PIXEL NUMBER BY FIELD")
		tbhdu.header["TTYPE4"]=("PIXELS CMB FIELD 4","PIXEL NUMBER BY FIELD")
		tbhdu.header["EXTNAME"]="FIELD PIXELS"
		tbhdu.header['PIXTYPE']=("HEALPIX","HEALPIX pixelisation")
		tbhdu.header['ORDERING']=("RING","Pixel order scheme, either RING or NESTED")
		tbhdu.header["NSIDE"]=(nside,'Healpix Resolution paramter')
		tbhdu.header['OBJECT']=('PARTIAL','Sky coverage, either FULLSKY or PARTIAL')
		tbhdu.header['OBS_NPIX']=(len(all_pix),'Number of pixels observed')
		tbhdu.header['INDXSCHM']=('IMPLICIT','indexing : IMPLICIT of EXPLICIT')
		tbhdu.header["COORDSYS"]=('G','Pixelization coordinate system')
		hdulist=fits.HDUList([prim,q_head,err_head,mask_head,tbhdu,tbhdu1,theo_head])
		hdulist.writeto(output_prefix+"quiet_simulated_{:.1f}.fits".format(bands[i]),clobber=True)
		print "quiet_simulated_{:.1f}.fits".format(bands[i])
#theory_cls=hp.read_cl('/home/matt/Planck/data/faraday/correlation/fr_theory_cl.fits')


cmb_cls=hp.read_cl('/home/matt/wmap/simul_scalCls.fits.lens')

synchrotron_file='/data/Planck/COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits'
dust_file='/data/Planck/COM_CompMap_DustPol-commander_1024_R2.00.fits'
dust_t_file='/data/Planck/COM_CompMap_dust-commander_0256_R2.00.fits'
dust_b_file='/data/Planck/COM_CompMap_ThermalDust-commander_2048_R2.00.fits'

##Dust intensity scaling factor
hdu_dust_t=fits.open(dust_t_file)
dust_t=hdu_dust_t[1].data.field('TEMP_ML')
hdu_dust_t.close()

dust_t=hp.reorder(dust_t,n2r=1)
dust_t=hp.ud_grade(dust_t,nside_in)

hdu_dust_b=fits.open(dust_b_file)
dust_beta=hdu_dust_b[1].data.field('BETA_ML_FULL')
hdu_dust_b.close

dust_beta=hp.reorder(dust_beta,n2r=1)	
dust_beta=hp.ud_grade(dust_beta,nside_in)

gamma_dust=6.626e-34/(1.38e-23*dust_t)
dust_factor=np.array([krj_to_kcmb[i]*1e-6*(np.exp(gamma_dust*353e9)-1)/(np.exp(gamma_dust*x*1e9)-1)* (x/353.)**(1+dust_beta) for i,x in enumerate(bands)])
df_sub=[ hp.ud_grade(df,nside_out) for  df in dust_factor]

##Testing for noise levels
##Create Field for analysis
Example #58
0
hdu_free = fits.open(free_file)
free_EM = hdu_free[1].data.field('EM_ML')
free_T = hdu_free[1].data.field('TEMP_ML')
hdu_free.close()


hdu_sync = fits.open(sync_file)
sync_map = hdu_sync[1].data.field('I_ML') * 1e-6 -2.725  ##Convert K_RJ to K_CMB
hdu_sync.close()

f=np.load(radio_file)
radio_map = f['image']
counts = f['counts']

sync_map = hp.reorder(sync_map,n2r=1)
dust_map = hp.reorder(dust_map,n2r=1)
free_EM = hp.reorder(free_EM,n2r=1)
free_T = hp.reorder(free_T,n2r=1)

cmb_mask = hp.ud_grade(cmb_mask,256)

##construct free-free intensity map
#
#gff = np.log( np.exp( 5.690 - np.sqrt(3.)/np.pi* np.log( freq * (free_T*1e-4)**(-1.5)) ) + np.e)
#tau = 0.05468 * (free_T)**(-1.5)*freq**(-2) * free_EM*gff
#free_map =  1e6*free_T*(1-np.exp(-tau))



radio_map[counts == 0] = hp.UNSEEN
Example #59
0
def correlate_signal(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,gal_cut=0.,mask_file=None):
	print "Computing Cross Correlations for Bands "+str(bands)


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	delta_alpha_radio=hp.read_map(alpha_file,hdu='uncertainty/phi')
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	nside_i=hdu_i['stokes iqu'].header['nside']
	nside_j=hdu_j['stokes iqu'].header['nside']
	hdu_i.close()
	hdu_j.close()
	

	ind_i=np.argwhere( wl == wl_i)[0][0]
	ind_j=np.argwhere( wl == wl_j)[0][0]
	npix_i=hp.nside2npix(nside_i)	
	npix_j=hp.nside2npix(nside_j)	
	#ipdb.set_trace()
	if npix_i != iqu_band_i[1].shape[0]:
		print 'NSIDE parameter not equal to size of map for file I'
		print 'setting npix to larger parameter'
		npix_i=iqu_band_i[1].shape[0]
	
	if npix_j != iqu_band_j[1].shape[0]:
		print 'NSIDE parameter not equal to size of map for file J'
		print 'setting npix to larger parameter'
		npix_j=iqu_band_j[1].shape[0]

	sigma_i=[noise_const_pol[ind_i]*np.random.normal(0,1,npix_i),noise_const_pol[ind_i]*np.random.normal(0,1,npix_i)]
	sigma_j=[noise_const_pol[ind_j]*np.random.normal(0,1,npix_j),noise_const_pol[ind_j]*np.random.normal(0,1,npix_j)]
	
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	
	sigma_q_i=hp.smoothing(sigma_i[0],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_i=hp.smoothing(sigma_i[1],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_q_j=hp.smoothing(sigma_j[0],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)	
	sigma_u_j=hp.smoothing(sigma_j[1],fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)	

	sigma_q_i=hp.ud_grade(sigma_q_i,nside_out)
	sigma_u_i=hp.ud_grade(sigma_u_i,nside_out)
	sigma_q_j=hp.ud_grade(sigma_q_j,nside_out)
	sigma_u_j=hp.ud_grade(sigma_u_j,nside_out)
		
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_i])**2)*np.pi/(180.*60.),verbose=False)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(beam_fwhm[ind_j])**2)*np.pi/(180.*60.),verbose=False)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	iqu_band_i=hp.ud_grade(iqu_band_i,nside_out=nside_out,order_in='ring')
	iqu_band_j=hp.ud_grade(iqu_band_j,nside_out=nside_out,order_in='ring')
	
	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]

	DQm=hp.ma(Delta_Q)
	DUm=hp.ma(Delta_U)
	aQm=hp.ma(alpha_q)
	aUm=hp.ma(alpha_u)
	sqi=hp.ma(sigma_q_i)
	sui=hp.ma(sigma_u_i)
	sqj=hp.ma(sigma_q_j)
	suj=hp.ma(sigma_u_j)
	salpha=hp.ma(delta_alpha_radio)
	alpham=hp.ma(alpha_radio)
	um=hp.ma(iqu_band_j[2])
	qm=hp.ma(iqu_band_j[1])
	
	Bl_factor=np.repeat(1.,3*nside_out)
	#ipdb.set_trace()
	if beam:
		Bl_factor=hp.gauss_beam(smoothing_scale*np.pi/(180.*60.),3*nside_out-1)
	pix_area=hp.nside2pixarea(nside_out)
	#ipdb.set_trace()
	mask_bool=np.repeat(False,npix_out)

	if gal_cut > 0:
		pix=np.arange(hp.nside2npix(nside_out))
		x,y,z=hp.pix2vec(nside,pix,nest=0)
		mask_bool= np.abs(z)<= np.sin(gal_cut*np.pi/180.)
	#mask_bool1[np.where(np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)<.2e-6)]=True
	if not (mask_file is None):
		mask_hdu=fits.open(mask_file)
		mask=mask_hdu[1].data.field(0)
		mask_hdu.close()
		
		mask=hp.reorder(mask,n2r=1)
		mask=hp.ud_grade(mask,nside_out=128)
		
		mask_bool=~mask.astype(bool)
		
		fsky= 1. - np.sum(mask)/float(len(mask))	
		L=np.sqrt(fsky*4*np.pi)
		dl_eff=2*np.pi/L
	
	DQm.mask=mask_bool
	DUm.mask=mask_bool
	aQm.mask=mask_bool
	aUm.mask=mask_bool
	sqi.mask=mask_bool
	sui.mask=mask_bool
	sqj.mask=mask_bool
	suj.mask=mask_bool
	salpha.mask=mask_bool
	alpham.mask=mask_bool
	um.mask=mask_bool
	qm.mask=mask_bool
	#ipdb.set_trace()
	cross1=hp.anafast(DQm,map2=aUm)/Bl_factor**2
	cross2=hp.anafast(DUm,map2=aQm)/Bl_factor**2
	

	##calculate theoretical variance for correlations
	N_dq=abs((sqi-sqj)**2).sum()*(pix_area/const)**2/(4.*np.pi)
	N_du=abs((sui-suj)**2).sum()*(pix_area/const)**2/(4.*np.pi)
	N_au=abs((salpha*um+alpham*suj+salpha*suj)**2).sum()*pix_area**2/(4.*np.pi)
	N_aq=abs((salpha*qm+alpham*sqj+salpha*sqj)**2).sum()*pix_area**2/(4.*np.pi)
	#ipdb.set_trace()

	return (cross1,cross2,N_dq,N_du,N_au,N_aq)
	map_prefix='/data/wmap/wmap_band_forered_iqumap_r9_9yr_'
	simul_prefix='/data/mwap/simul_fr_rotated_'
	
	wmap_files=[ map_prefix+name+'_v5.fits' for name in names]
	simul_files=[simul_prefix+str(band).zfill(3)+'.fits' for band in bands]
	
	noise_const_t=np.asarray([1.429,1.466,2.188,3.131,6.544])*1e-3
	noise_const_q=np.asarray([1.435,1.472,2.197,3.141,6.560])*1e-3
	
	q_array=np.zeros((num_wl,npix))
	u_array=np.zeros((num_wl,npix))
	sigma_q=np.zeros((num_wl,npix))
	sigma_u=np.zeros((num_wl,npix))
	for i in range(num_wl):
		wmap_counts=hp.read_map(wmap_files[i],nest=1,field=3);
		wmap_count=hp.reorder(wmap_counts,n2r=1)
		tmp_cmb=rotate_tqu(simul_cmb,wl[i],alpha_radio);
		tmp_cmb= hp.smoothing(tmp_cmb,fwhm=fwhm_w[i]*np.pi/180.,pol=1
		tmp_out=np.random.normal(0,1,npix1)*noise_const_q[i]/np.sqrt(wmap_counts)
		tmp_out	=hp.sphtfunc.smoothing(tmp_out,fwhm=np.pi/180.)
		sigma_q[i] =hp.ud_grade(tmp_out,nside_out=nside)
		tmp_out=np.random.normal(0,1,npix1)*noise_const_q[i]/np.sqrt(wmap_counts)
		tmp_out	=hp.sphtfunc.smoothing(tmp_out,fwhm=np.pi/180.)
		sigma_u[i] =hp.ud_grade(tmp_out,nside_out=nside)
		tmp_out=hp.sphtfunc.smoothing(tmp_out,fwhm=np.sqrt(1.-fwhm[w]**2)*np.pi/180.,pol=1)
		tmp_out=hp.ud_grade(tmp_cmb,nside_out=nside)
		q_array[i]=tmp_out[1]+sigma_q[i]
		u_array[i]=tmp_out[2]+sigma_u[i]
	#emcee code will go here
	
	t0=time()