예제 #1
0
파일: sauron.py 프로젝트: amaurea/enlib
def build_wiN_ivar(maps, ivars, wt, apod=None, smooth=5, tol=1e-2):
    # This is very ad-hoc, but it mostly works now. It's hard to
    # avoid things blowing up near the edge, but at least that's
    # only at the level of a few thousand fake sources at the edge now,
    # not infinitely bright spots with huge ringing around them.
    ncomp = len(maps)
    wnoise = wt.map2wave(maps)
    wivar = wt.map2wave(ivars, half=True)
    if apod is not None: wapod = wt.map2wave(apod, half=True)
    wiN = multimap.zeros(
        [geo.with_pre((ncomp, ncomp)) for geo in wnoise.geometries],
        dtype=maps.dtype)
    for i, (m, iv) in enumerate(zip(wnoise.maps, wivar.maps)):
        hiv = np.maximum(iv, 0)**0.5
        # We model the noise cov per wavelet scale as v**0.5 W v**0.5.
        # Since these are pixel-diagonal, that's just W v.
        # Measure W from whitened map. Let's still use ivar-weighting.
        weight = hiv[:, None] * hiv[None, :]
        srad = 2 * np.pi / wt.basis.lmaxs[i] * smooth
        rhs = enmap.smooth_gauss(
            m[:, None] * m[None, :] * hiv[:, None] * hiv[None, :] * weight,
            srad)
        div = enmap.smooth_gauss(weight, srad) * hiv[:, None] * hiv[None, :]
        norm = np.max(np.einsum("aa...->a...", div), (-2, -1))[:, None,
                                                               None]**0.5
        div = np.maximum(div, norm[:, None] * norm[None, :] * tol)
        #enmap.write_map("rhs_%02d.fits" % i, rhs)
        #enmap.write_map("div_%02d.fits" % i, div)

        Nmap = rhs / div
        # Don't allow the noise to be lower than the white noise floor
        for j in range(len(iv)):
            Nmap[j, j] = np.maximum(Nmap[j, j], 1 / np.maximum(iv[j], 1e-20))
        del rhs, div
        # Too low regions should get no weight
        #enmap.write_map("Nmap_%02d.fits" % i, Nmap)
        wiN.maps[i] = analysis.safe_pow(Nmap, -1)
        if apod is not None:
            # Compute how much our result has been biased by apodized/missing regions.
            # I don't understand why it didn't work to bake this into the
            # equation above.
            norm = np.maximum(np.max(wapod.maps[i], (-2, -1)), 1)
            bias = enmap.smooth_gauss(wapod.maps[i] / norm[:, None, None],
                                      srad)
            bias *= bias > 0.1
            # Overcompensate for this
            wiN.maps[i] *= bias[:, None] * bias[None, :]
        #enmap.write_map("iNmap_%02d.fits" % i, wiN.maps[i])
        #enmap.write_map("wapod_%02d.fits" % i, wapod.maps[i]/np.max(wapod.maps[i]))
    return wiN
예제 #2
0
def preprocess_P(imap, dust_removal=None, smooth=None, box=None):
    omap = np.sum(imap[1:]**2, axis=0)**0.5
    if dust_removal is not None:
        omap -= np.sum(dust_removal[1:]**2, axis=0)**0.5
    if smooth is not None:
        omap = enmap.smooth_gauss(omap, smooth * u.fwhm * u.arcmin)
    return enmap.submap(omap, box=box)
예제 #3
0
파일: sauron.py 프로젝트: amaurea/enlib
def build_wiN(maps, wt, smooth=5):
    ncomp = len(maps)
    wnoise = wt.map2wave(maps)
    wiN = multimap.zeros(
        [geo.with_pre((ncomp, ncomp)) for geo in wnoise.geometries],
        dtype=maps.dtype)
    for i, m in enumerate(wnoise.maps):
        srad = 2 * np.pi / wt.basis.lmaxs[i] * smooth
        Nmap = enmap.smooth_gauss(m[:, None] * m[None, :], srad)
        wiN.maps[i] = analysis.safe_pow(Nmap, -1)
    return wiN
예제 #4
0
def process_map(imap, box=None, deconvolve=False, freq=None, smooth=None):
    if deconvolve:
        fwhm = fwhms[freq]
        l = imap.modlmap()
        bmap = np.exp(-0.5*l**2*(fwhm*u.fwhm*u.arcmin)**2)
        bmap = np.maximum(bmap, 1e-3)
        fmap = enmap.fft(imap)
        omap = enmap.ifft(fmap/bmap).real
    else:
        omap = imap
    if smooth is not None:
        omap = enmap.smooth_gauss(omap, smooth*u.fwhm*u.arcmin)
    if box is not None: omap = omap.submap(box)
    if args.comp == 0: return np.log10(omap[0])
    if args.comp == 12: return np.sum(omap[1:]**2, axis=0)**0.5
    else: return omap[args.comp]
예제 #5
0
    def update_mask(self,rand_sigma_arcmin=2.,rand_threshold=1e-3):
        if rand_sigma_arcmin>1.e-3:
            if self.verbose: print( "Smoothing...")
            if self.curved:
                smap = hp.smoothing(self.rand_map,sigma=rand_sigma_arcmin*np.pi/180./60.)
            else:
                smap = enmap.smooth_gauss(self.rand_map,rand_sigma_arcmin*np.pi/180./60.)
            if self.verbose: print( "Done smoothing...")
        else:
            if self.verbose: smap = self.rand_map

        self.mask = np.zeros(self.shape)
        self.mask[smap>rand_threshold] = 1
        if not self.curved:
            self.mask = enmap.enmap(self.mask,self.wcs)
        self._counts()
예제 #6
0
def map_generate_final_map(numerics, cosmology, dndOmega, \
                           thetas, yprofiles, wcs):#{{{

    start_total = time.time()
    if numerics['map_include_bias']:
        f = np.load(path + '/bias.npz')
        bias = f['bias']

    # consistency checks#{{{
    if dndOmega.shape[0] != numerics['map_Npoints_M']:
        print('dndOmega mass problem')
        print('dndOmega has ' + str(dndOmega.shape[0]) + ' mass entries')
        print('while we have ' + str(numerics['map_Npints_M']) +
              ' mass grid points')
        return
    if dndOmega.shape[1] != numerics['map_Npoints_z']:
        print('dndOmega redshift problem')
        print('dndOmega has ' + str(dndOmega.shape[1]) + ' redshift entries')
        print('while we have ' + str(numerics['map_Npints_z']) +
              ' redshift grid points')
        return
    if len(yprofiles) != numerics['map_Npoints_M']:
        print('yprofiles mass problem')
        print('yprofiles has ' + str(len(yprofiles)) + ' mass entries')
        print('while we have ' + str(numerics['map_Npoints_M']) +
              ' mass grid points')
        return
    if len(yprofiles[0]) != numerics['map_Npoints_z']:
        print('yprofiles redshift problem')
        print('yprofiles has ' + str(len(yprofiles[0])) + ' redshift entries')
        print('while we have ' + str(numerics['map_Npoints_z']) +
              ' redshift grid points')
        return
    #}}}

    # start off with a map of the desired size
    final_map = enmap.enmap(np.zeros((numerics['map_height_pix'], \
                                      numerics['map_width_pix'])), \
                            wcs=wcs)

    # pad out to a square map, extended appropriately for tSZ code
    map_width_ext = int(numerics['map_width_pix'] / \
                        numerics['map_fraction'])
    map_height_ext = int(numerics['map_height_pix'] / \
                         numerics['map_fraction'])
    map_size_ext = max(map_width_ext, map_height_ext)
    spare_pix_hor = int((map_size_ext - \
                         numerics['map_width_pix']) / 2.0)
    spare_pix_ver = int((map_size_ext - \
                         numerics['map_height_pix']) / 2.0)
    ext_map = enmap.pad(final_map, [spare_pix_ver, spare_pix_hor])
    map_area = (map_size_ext * numerics['map_pixel_size'])**2
    ###print(final_map.shape)
    ###print(square_map.shape)
    ###print(map_size_ext)
    ###exit()

    # generate the tSZ signal
    for jj in xrange(numerics['map_Npoints_z']):
        if numerics['verbose']:
            print(str(jj))
        start = time.time()
        if numerics['map_include_bias']:
            # fo each redshift bin, compute one random realization of the overdensity field
            delta_map = map_generate_linear_density_field(
                jj, numerics, cosmology, path)
            delta_map = delta_map.flatten()
        for ii in xrange(numerics['map_Npoints_M']):
            if numerics['map_Poisson']:
                cluster_number = np.random.poisson(dndOmega[ii, jj] * map_area)
            else:
                middle = dndOmega[ii, jj] * map_area
                lower = np.floor(middle)
                upper = np.ceil(middle)
                if np.random.rand() < (middle - lower):
                    cluster_number = int(upper)
                else:
                    cluster_number = int(lower)
            if cluster_number > 0:
                if numerics['map_include_bias']:
                    #probabilities = 1. + bias[ii,jj] * delta_map
                    #probabilities[np.where(probabilities<0.)] = 0.
                    #probabilities /= sum(probabilities)
                    probabilities = map_get_probabilities(
                        bias[ii, jj], delta_map)
                    central_pixels = np.random.choice(len(delta_map),
                                                      p=probabilities,
                                                      replace=True,
                                                      size=cluster_number)
                    central_pixels_x = np.zeros(cluster_number, dtype=int)
                    central_pixels_y = np.zeros(cluster_number, dtype=int)
                    for kk in xrange(cluster_number):
                        central_pixels_x[
                            kk] = central_pixels[kk] / ext_map.shape[0]
                        central_pixels_y[
                            kk] = central_pixels[kk] % ext_map.shape[0]
                else:
                    central_pixels_x = np.random.random_integers(
                        0, ext_map.shape[0] - 1, size=cluster_number)
                    central_pixels_y = np.random.random_integers(
                        0, ext_map.shape[1] - 1, size=cluster_number)
                random_offset_x = np.random.rand() - 0.5
                random_offset_y = np.random.rand() - 0.5
                t = thetas[ii][jj]
                y = yprofiles[ii][jj]
                y_interpolator = interp1d(t,
                                          y,
                                          kind='cubic',
                                          bounds_error=False,
                                          fill_value=(max(y), 0.))
                T_of_theta = lambda theta: T(cosmology, y_interpolator(theta))
                this_map = np.zeros(
                    (2 * int(max(t) / numerics['map_pixel_size']) + 5,
                     2 * int(max(t) / numerics['map_pixel_size']) +
                     5))  # want central pixel to be on center of the cluster
                pixel_indices_x = np.linspace(-(this_map.shape[0] - 1) / 2.,
                                              (this_map.shape[0] - 1) / 2.,
                                              num=this_map.shape[0])
                pixel_indices_y = np.linspace(-(this_map.shape[1] - 1) / 2.,
                                              (this_map.shape[1] - 1) / 2.,
                                              num=this_map.shape[1])
                # average over angles
                nn = 0
                for kk in xrange(-numerics['map_grid_per_pixel'],
                                 numerics['map_grid_per_pixel'] + 1):
                    for ll in xrange(-numerics['map_grid_per_pixel'],
                                     numerics['map_grid_per_pixel'] + 1):
                        angles = numerics['map_pixel_size'] * np.sqrt(
                            np.add.outer(
                                (pixel_indices_x + random_offset_x + float(kk)
                                 / float(numerics['map_grid_per_pixel'] + 0.5))
                                **2.,
                                (pixel_indices_y + random_offset_y + float(ll)
                                 / float(numerics['map_grid_per_pixel'] + 0.5))
                                **2.))
                        this_map += T_of_theta(angles)
                        nn += 1
                this_map *= 1. / float(nn)
                ext_map = throw_clusters(cluster_number, ext_map, this_map,
                                         central_pixels_x, central_pixels_y)
        end = time.time()
        if numerics['verbose']:
            print(
                str((numerics['map_Npoints_z'] - jj) * (end - start) / 60.) +
                ' minutes remaining in map_generate_final_map')
            #print('I am in index = ' + str(index))
    '''
    # need to take a subset of the final map, since otherwise we're getting a bias (centres of clusters are currently always in the map)
    spare_pixels_horizontal = int((1.-numerics['map_fraction'])/2.*final_map.shape[0])
    spare_pixels_vertical   = int((1.-numerics['map_fraction'])/2.*final_map.shape[1])
    hist = map_get_histogram(final_map[spare_pixels_horizontal:-spare_pixels_horizontal-1,spare_pixels_vertical:-spare_pixels_vertical-1])
    np.savez(path + '/p_' + str(index) + '.npz', p = hist)
    #inal_map = final_map[spare_pixels_horizontal:-spare_pixels_horizontal-1,spare_pixels_vertical:-spare_pixels_vertical-1]

    # Now do the apodization to get the power spectrum
    final_map[:spare_pixels_horizontal, :] *= np.linspace(0.*np.ones(final_map.shape[1]), 1.*np.ones(final_map.shape[1]), num = spare_pixels_horizontal, axis = 0)
    final_map[-spare_pixels_horizontal:, :] *= np.linspace(0.*np.ones(final_map.shape[1]), 1.*np.ones(final_map.shape[1]), num = spare_pixels_horizontal, axis = 0)[::-1, :]
    final_map[:, :spare_pixels_vertical] *= np.linspace(0.*np.ones(final_map.shape[0]), 1.*np.ones(final_map.shape[0]), num = spare_pixels_vertical, axis = 1)
    final_map[:, -spare_pixels_vertical:] *= np.linspace(0.*np.ones(final_map.shape[0]), 1.*np.ones(final_map.shape[0]), num = spare_pixels_vertical, axis = 1)[:, ::-1]
    #plt.matshow(final_map)
    #plt.show()
    #np.savez(path + '/final_map_' + str(index) + '.npz', final_map = final_map)
    ell, Cell = map_get_powerspectrum(numerics, final_map)
    np.savez(path + '/PS_' + str(index) + '.npz', ell = ell, Cell = Cell)
    end_total = time.time()
    if numerics['verbose'] :
        print 'used ' + str((end_total - start_total)/60.) + ' minutes in total'
    #plt.loglog(ell, Cell)
    #plt.savefig('tSZ_power_spectrum.pdf')
    #plt.show()
    '''

    # @TODO: add apodization scale to numerical_parameters? or
    #        always use multiple sigma?
    # now smooth with instrumental beam. first, trim to a map of
    # the desired size plus a small buffer for apodization to
    # minimize ringing from harmonic-space smoothing
    map_width_apod = numerics['map_width_pix'] + 100
    map_height_apod = numerics['map_height_pix'] + 100
    spare_pix_hor = int((map_size_ext - map_width_apod) / 2.0)
    spare_pix_ver = int((map_size_ext - map_height_apod) / 2.0)
    apod_map = ext_map[spare_pix_ver: spare_pix_ver + map_height_apod, \
                       spare_pix_hor: spare_pix_hor + map_width_apod]
    apod_map = enmap.apod(apod_map, 25)
    beam_sigma = cosmology['beam_fwhm_arcmin'] * \
                 np.pi / 180.0 / 60.0 / \
                 np.sqrt(8.0 * np.log(2.0))
    apod_map = enmap.smooth_gauss(apod_map, beam_sigma)

    # finally, trim off the apodization padding
    spare_pix_hor = int((map_width_apod - \
                         numerics['map_width_pix']) / 2.0)
    spare_pix_ver = int((map_height_apod - \
                         numerics['map_height_pix']) / 2.0)
    final_map = apod_map[spare_pix_ver: \
                         spare_pix_ver + numerics['map_height_pix'], \
                         spare_pix_hor: \
                         spare_pix_hor + numerics['map_width_pix']]
    end_total = time.time()
    if numerics['verbose']:
        print('used ' + str((end_total - start_total) / 60.) +
              ' minutes in total')
    return final_map
예제 #7
0
#sys.exit()

imap = reproject.ivar_hp_to_cyl(mask,
                                shape,
                                wcs,
                                rot=False,
                                do_mask=False,
                                extensive=False)

fname = f'{opath}/car_mask_lmax_{lmax}.fits'
enmap.write_map(fname, imap)

deg = 2.0

smoothed = enmap.smooth_gauss(imap, np.deg2rad(deg))

fname = f'{opath}/car_mask_lmax_{lmax}_smoothed_{deg:.1f}_deg.fits'
enmap.write_map(fname, smoothed)

# fname = f'{opath}/car_mask_lmax_{lmax}_smoothed_{deg:.1f}_deg_south.fits'
# enmap.write_map(fname,smoothed*nmask)

# fname = f'{opath}/car_mask_lmax_{lmax}_smoothed_{deg:.1f}_deg_north.fits'
# enmap.write_map(fname,smoothed*smask)

# io.plot_img(smoothed,'sm_low_carmask.png')
# io.plot_img(smoothed*nmask,'sm_low_south_carmask.png')
# io.plot_img(smoothed*smask,'sm_low_north_carmask.png')

deg = 2.0
예제 #8
0
    ax.tick_params(axis='y', colors='white', which='both', labelcolor='black')
    ax.set_aspect('equal')
    for side in ['left','right','top','bottom']:
        ax.spines[side].set_visible(True)
        ax.spines[side].set_color('white')
    plotstyle.setup_axis(ax, nticks=[10,5])
    ax.set_ylabel("$b$")
    ax.set_xlabel('$l$')
    plt.tight_layout(h_pad=0.1)

# polarization angle plot
# reload imap to get the original resolution
# seed = enmap.rand_gauss(imap[0].shape, imap.wcs)
# seed = enmap.smooth_gauss(seed, 0.5*u.arcmin*u.fwhm)
seed = None
imap = enmap.smooth_gauss(imap, 5*u.arcmin*u.fwhm)
P    = np.sum(imap[1:]**2,axis=0)**0.5
if not op.exists(args.texture) or args.force:
    theta = lib.Bangle(imap[1], imap[2], toIAU=True)
    # no need to add for LIC pi/2
    texture = lib.LIC_texture(theta, length=0.1, seed=seed, contrast=True)
    np.save(args.texture, texture)
else:
    texture = np.load(args.texture)
# boost contrast
curve = lambda x: 1/(1+np.exp(-(x-0.5)))
# texture = curve(texture)  # option to adjust contrast of lic texture
alpha = np.min([np.ones_like(texture), 1.2*(P/P.max())**0.7],axis=0)
textures = np.stack([np.ones_like(texture)*alpha]*3+[0.6*texture], axis=2)
ax.imshow(tmap, **popts)
ax.imshow(textures, origin='lower')
예제 #9
0
"""
Loads a catalog
Maps it
Smooths it
Thresholds it
Projects it onto ACT
This gives a mask of 1s and 0s from which a random catalog can be made
"""


paths = cutils.paths
#cat_type = "wise_panstarrs"
#cat_type = "madcows_photz"
cat_type = args.sys[1]
meanfield = False

# cat_type = "sdss_redmapper"
# meanfield = True

shape,wcs = enmap.fullsky_geometry(res=1 * utils.degree)
ras,decs,_ = cutils.catalog_interface(cat_type,is_meanfield=meanfield)
cmapper = catalogs.CatMapper(ras,decs,shape=shape,wcs=wcs)
cmap = maps.binary_mask(enmap.smooth_gauss(cmapper.counts,2 * utils.degree),1e-3)
io.hplot(cmap,'counts')


shape,wcs = enmap.read_map_geometry(paths.coadd_data + f"act_planck_s08_s18_cmb_f150_daynight_srcfree_map.fits")
omap = enmap.project(cmap,shape,wcs,order=0)
io.plot_img(omap,'pcounts')
enmap.write_map(f'{paths.scratch}{cat_type}_mask.fits',omap)
예제 #10
0
else:
    rmap_f150 = lib.beam_match(imap_f150, 'f090', 'f150')
    rmap_f220 = lib.beam_match(imap_f220, 'f090', 'f220')

# decide whether to plot total intensity (imap[0]) or the polarization
# intensity: imap[1]**2+imap[2]**2)**0.5
if not args.pol: fun = lambda x: x[0]
else: fun = lambda x: np.sum(x[1:]**2, axis=0)**0.5

rmap_f090 = fun(rmap_f090)
rmap_f150 = fun(rmap_f150)
rmap_f220 = fun(rmap_f220)

# optionally apply a filter
if args.smooth > 0:
    rmap_f090 = enmap.smooth_gauss(rmap_f090, args.smooth * u.fwhm * u.arcmin)
    rmap_f150 = enmap.smooth_gauss(rmap_f150, args.smooth * u.fwhm * u.arcmin)
    rmap_f220 = enmap.smooth_gauss(rmap_f220, args.smooth * u.fwhm * u.arcmin)
    # get s/n factors after smoothing, but it depends on whether we have
    # matched our beam to a particular size
    if not args.beam_match:
        s_f090 = sfactor('f090', args.smooth)
        s_f150 = sfactor('f150', args.smooth)
        s_f220 = sfactor('f220', args.smooth)
    else:
        s_f090 = s_f150 = s_f220 = sfactor('f090', args.smooth)
    snr_f090 = enmap.smooth_gauss(snr_f090,
                                  args.smooth * u.fwhm * u.arcmin) * s_f090
    snr_f150 = enmap.smooth_gauss(snr_f150,
                                  args.smooth * u.fwhm * u.arcmin) * s_f150
    snr_f220 = enmap.smooth_gauss(snr_f220,
예제 #11
0
                              os.environ['WORK'] + "/" + y,
                              ticks=5,
                              tick_unit='arcmin',
                              grid=True,
                              colorbar=True,
                              color='gray',
                              upgrade=4,
                              quantile=1e-3)

print("Starting deep56")
rank, ystack1, cstack1, i1 = do(ymap1, cmap1, mask1, ras1, decs1, wt1)
if rank == 0:
    print(i1)
    hplot(ystack1, "fig_all_qso_ystack_%s_%s" % (cversion, 'deep56'))
    hplot(cstack1, "fig_all_qso_cstack_%s_%s" % (cversion, 'deep56'))

print("Starting boss")
rank, ystack2, cstack2, i2 = do(ymap2, cmap2, mask2, ras2, decs2, wt2)
if rank == 0:
    print(i2)
    hplot(ystack2, "fig_all_qso_ystack_%s_%s" % (cversion, 'boss'))
    hplot(cstack2, "fig_all_qso_cstack_%s_%s" % (cversion, 'boss'))

    ystack = (ystack1 + ystack2) / (i1 + i2)
    cstack = (cstack1 + cstack2) / (i1 + i2)

    hplot(enmap.smooth_gauss(ystack, np.deg2rad(2 / 60.)),
          "fig_all_qso_ystack_%s_%s" % (cversion, 'both'))
    hplot(enmap.smooth_gauss(cstack, np.deg2rad(2 / 60.)),
          "fig_all_qso_cstack_%s_%s" % (cversion, 'both'))
예제 #12
0
import argparse, os, os.path as op
from common import *
import lib
from matplotlib import pyplot as plt
from pixell import enmap
import plotstyle

# parser defined in common
parser.add_argument("--freq", default='f090')
parser.add_argument("--smooth", type=float, default=None)
args = parser.parse_args()
if not op.exists(args.odir): os.makedirs(args.odir)

box = boxes[args.area]
imap = load_map(filedb[args.freq]['coadd'], box=box, fcode=args.freq) / 1e9
if args.smooth:
    imap = enmap.smooth_gauss(imap, args.smooth * u.fwhm *
                              u.arcmin)  # u defined in common
# calculate polarization angle
Pangle = 90 - lib.Bangle(imap[1], imap[2], toIAU=True) / np.pi * 180
print(f"Pangle = {np.median(Pangle)} +- {np.std(Pangle, ddof=1)}")
plt.hist(np.ravel(Pangle), bins=100)
plt.xlabel('Tilt w.r.t Galactic plane [deg]')
ofile = op.join(args.odir, args.oname)
print("Writing:", ofile)
plt.savefig(ofile, bbox_inches='tight')