Example #1
0
def GeneratePowerSpectrum(lightcone_fg,redshift_lc,frequency_lc,lc_slice_sel,Ngrid,inputdatadir,outputdatadir,lcgridfilename,lcdesiredfields):

	print("Generating the power spectrum")
	start = time.time()

	#Read the lightcone from the meraxes grid data
	gridata = ReadGridData(inputdatadir,lcgridfilename,lcdesiredfields)

	#Keep only parts that are within the given frequency range
	lightcone_eor = gridata["LightconeBox"][:,:,lc_slice_sel]

	#The number of frequencies
	nfreq = len(frequency_lc)

	# the lightcone cube that we fill out
	fg_lightcone = np.zeros([Ngrid, Ngrid,nfreq])

	# convert foreground to brightness temperature and fill up a cube with its evolution at each frequency
	ii = 0
	for jj in range(nfreq):
			fg_lightcone[:,:,ii] = lightcone_fg[:,:,jj] / (mpctoRadian(500, redshift_lc[jj]) / Ngrid)**2 / mKtoJy_per_sr(frequency_lc[jj])
			ii+=1

	# sort out the coords of box in Mpc
	Lz_Mpc = cosmo.comoving_transverse_distance(redshift_lc[-1]).value-cosmo.comoving_transverse_distance(redshift_lc[0]).value + np.diff(cosmo.comoving_transverse_distance(redshift_lc).value)[0]	

	coords_xy = np.linspace(0, 500, Ngrid+1)
	coords_xy = coords_xy[1:] - np.diff(coords_xy)[0]/2
	coords_z = cosmo.comoving_transverse_distance(redshift_lc).value

	new_coords_z = np.linspace(coords_z.min(), coords_z.max(), 150)

	xx, yy, zz = np.meshgrid(coords_xy, coords_xy, new_coords_z, indexing='ij')

	# interpolate the foreground across frequency to smoothen its evolution
	f_lc_fg = scipy.interpolate.RegularGridInterpolator([coords_xy, coords_xy, coords_z], fg_lightcone)
	highres_lightcone_fg = f_lc_fg(np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T).reshape(Ngrid,Ngrid,len(new_coords_z))

	# interpolate the eor across frequency to make sure we have the same dimension as foregrounds
	f_lc_eor = scipy.interpolate.RegularGridInterpolator([coords_xy, coords_xy, coords_z], lightcone_eor)
	highres_lightcone_eor = f_lc_eor(np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T).reshape(Ngrid,Ngrid,len(new_coords_z))

	# Find the PS of foregrounds plus eor
	P_both, kperp, kparal = get_power((highres_lightcone_fg + highres_lightcone_eor) * signal.blackmanharris(len(new_coords_z)), [500/cosmo.h, 500/cosmo.h, Lz_Mpc/cosmo.h], bins = 50, res_ndim =2, bin_ave=False, get_variance=False)

	# Find the PS of foregrounds only
	P_fg = get_power(highres_lightcone_fg * signal.blackmanharris(len(new_coords_z)), [500/cosmo.h, 500/cosmo.h, Lz_Mpc/cosmo.h], bins = 50, res_ndim =2, bin_ave=False, get_variance=False)[0]

	# Find the PS of eor only
	P_eor = get_power(highres_lightcone_eor * signal.blackmanharris(len(new_coords_z)), [500/cosmo.h, 500/cosmo.h, Lz_Mpc/cosmo.h], bins = 50, res_ndim =2, bin_ave=False, get_variance=False)[0]

	#Save the power spectrum
	np.savez_compressed(outputdatadir + "power_spectrum.npz", power_spectrum_both = P_both, power_spectrum_eor = P_eor, power_spectrum_fg = P_fg, kperp = kperp, kparal = kparal)

	print("Done generating the power spectrum in",time.time()-start)
Example #2
0
def gen_wedge_psf(nx, ny, nf, dx, dy, df, z, out, threads=None):
    u = fftshift(fftfreq(nx, dx * np.pi / 180))
    v = fftshift(fftfreq(ny, dy * np.pi / 180))
    e = fftshift(fftfreq(nf, df))

    E = np.sqrt(Cosmo.Om0 * (1 + z) ** 3 +
                Cosmo.Ok0 * (1 + z) ** 2 + Cosmo.Ode0)
    D = Cosmo.comoving_transverse_distance(z).value
    H0 = Cosmo.H0.value * 1e3
    c = const.c.value
    print(E, D, H0)
    kx = u * 2 * np.pi / D
    ky = v * 2 * np.pi / D
    k_perp = np.sqrt(kx ** 2 + ky[np.newaxis, ...].T ** 2)
    k_par = e * 2 * np.pi * H0 * f21 * E / (c * (1 + z) ** 2)
    arr = np.ones((nf, nx, ny), dtype='complex128')
    for i in range(nf):
        mask = (k_perp > np.abs(k_par[i]) * c * (1 + z) / (H0 * E * D))
        arr[i][mask] = 0
    np.save('kx.npy', kx)
    np.save('ky.npy', ky)
    np.save('kpar.npy', k_par)
    np.save('wedge_window.npy', arr.real)
    fft_arr = fftshift(fftn(ifftshift(arr))).real
    hdu = fits.PrimaryHDU(data=fft_arr)
    hdr_dict = dict(cdelt1=dx, cdelt2=dy, cdelt3=df,
                    crpix1=nx/2, crpix2=ny/2, crpix3=nf/2,
                    crval1=0, crval2=0, crval3=0,
                    ctype1='RA---SIN', ctype2='DEC--SIN', ctype3='FREQ',
                    cunit1='deg', cunit2='deg', cunit3='Hz')
    for k, v in hdr_dict.items():
        hdu.header[k] = v
    hdu.writeto(out, clobber=True)
Example #3
0
def _configure(infiles, z, df):
    conf.infiles = infiles
    img_hdr = fits.getheader(conf.infiles[0])
    conf.nx = img_hdr['naxis1']
    conf.ny = img_hdr['naxis2']
    conf.nf = MWA_FREQ_EOR_ALL_80KHZ.size
    conf.dx = np.abs(img_hdr['cdelt1']) * np.pi / 180
    conf.dy = np.abs(img_hdr['cdelt2']) * np.pi / 180
    conf.df = df
    conf.du = 1 / (conf.nx * conf.dx)
    conf.dv = 1 / (conf.ny * conf.dy)
    conf.deta = 1 / (conf.nf * conf.df)
    conf.freq = MWA_FREQ_EOR_ALL_80KHZ
    conf.u = fftshift(fftfreq(conf.nx, conf.dx))
    conf.v = fftshift(fftfreq(conf.ny, conf.dy))
    conf.eta = fftshift(fftfreq(conf.nf, conf.df))
    conf.z = z
    conf.cosmo_d = Cosmo.comoving_transverse_distance(conf.z).value
    conf.cosmo_e = np.sqrt(Cosmo.Om0 * (1 + conf.z) ** 3 + Cosmo.Ok0 *
                           (1 + conf.z) ** 2 + Cosmo.Ode0)
    conf.cosmo_h0 = Cosmo.H0.value * 1e3
    conf.cosmo_c = const.si.c.value
    conf.kx = conf.u * 2 * np.pi / conf.cosmo_d
    conf.ky = conf.v * 2 * np.pi / conf.cosmo_d
    conf.dkx = conf.du * 2 * np.pi / conf.cosmo_d
    conf.dky = conf.dv * 2 * np.pi / conf.cosmo_d
    conf.k_perp = np.sqrt(conf.kx ** 2 + conf.ky[np.newaxis, ...].T ** 2)
    conf.k_par = conf.eta * 2 * np.pi * conf.cosmo_h0 * _F21 * \
        conf.cosmo_e / (conf.cosmo_c * (1 + conf.z) ** 2)
Example #4
0
def uv2kpr(blmag, cen_fq):
    """
    Compute k_perpendicular from the magnitude of the baseline vector and the
    central frequency of observation.
    
    blmag is an np.array of baseline vector magnitudes, units of length
    cen_fq is the central frequency.
    
    Returns k_perpendicular in units of h/Mpc
    """
    z = f2z(cen_fq)
    lam = c.c / (cen_fq)
    uvmag = blmag / lam
    kpr = 2 * np.pi * uvmag / (cosmo.comoving_transverse_distance(z) * cosmo.h)
    return kpr.to(1. / u.Mpc)
Example #5
0
    def rescale(self, flux, errs, redshift):
        """
		Fixes the public flux to be at a common distance to the Factory data
		as in Sam's IDRTools.py file (fixes them inplace).

		:flux: (float array) flux values
		:errs: (float array) error values, possibly containing None elements
		:redshift: (float) redshift

		:returns: (float tuple) corrected values in (flux,errs) form
		"""
        dl = (1 +
              redshift) * cosmo.comoving_transverse_distance(redshift).value
        dlref = cosmo.luminosity_distance(0.05).value
        flux = np.divide(flux, (1 + redshift) / (1 + 0.05) * (dl / dlref)**2)
        errs = np.divide(errs, (1 + redshift) / (1 + 0.05) * (dl / dlref)**2)
        return (flux, errs)
Example #6
0
def get_cosmo(z):
    """Return transverse comoving distance and 3-parameter density term 
     (Omega_m, Omega_k, Lambda) for the given redshifts.

    Parameters
    ----------
    z : float
        Redshift

    Returns
    -------
    out : tuple
        (transverse comoving distance, density term)
    
    """
    cosmo_d = Cosmo.comoving_transverse_distance(z).value
    cosmo_e = np.sqrt(Cosmo.Om0 * (1 + z)**3 + Cosmo.Ok0 * (1 + z)**2 +
                      Cosmo.Ode0)
    return cosmo_d, cosmo_e
Example #7
0
def physical_grid_lf(input_array,
                     refinement=1,
                     pad=2,
                     order=0,
                     feedback=1,
                     mode='constant'):
    r"""Project from freq, ra, dec into physical coordinates

    Parameters
    ----------
    input_array: np.ndarray
        The freq, ra, dec map

    Returns
    -------
    cube: np.ndarray
        The cube projected back into physical coordinates

    """
    if not hasattr(pad, '__iter__'):
        pad = [pad, pad, pad]
    pad = np.array(pad)

    freq_axis = input_array.get_axis('freq')  #/ 1.e6
    ra_axis = input_array.get_axis('ra')
    dec_axis = input_array.get_axis('dec')

    freq_axis = np.pad(freq_axis, 1, mode='edge')
    ra_axis = np.pad(ra_axis, 1, mode='edge')
    dec_axis = np.pad(dec_axis, 1, mode='edge')
    freq_axis[0] -= input_array.info['freq_delta']
    freq_axis[-1] += input_array.info['freq_delta']
    ra_axis[0] -= input_array.info['ra_delta']
    ra_axis[-1] += input_array.info['ra_delta']
    dec_axis[0] -= input_array.info['dec_delta']
    dec_axis[-1] += input_array.info['dec_delta']
    input_array = np.pad(input_array, 1, mode='constant')

    _dec, _ra = np.meshgrid(dec_axis, ra_axis)
    _ra, _dec = centering_to_fieldcenter(_ra, _dec)

    # convert the freq, ra and dec axis to physical distance
    z_axis = __nu21__ / freq_axis - 1.0
    d_axis = (cosmology.comoving_transverse_distance(z_axis) *
              cosmology.h).value
    c_axis = (cosmology.comoving_distance(z_axis) * cosmology.h).value

    d_axis = d_axis[:, None, None]
    c_axis = c_axis[:, None, None]
    _ra = _ra[None, :, :]
    _dec = _dec[None, :, :]

    xx = d_axis * np.cos(np.deg2rad(_ra)) * np.cos(np.deg2rad(_dec))
    yy = d_axis * np.sin(np.deg2rad(_ra)) * np.cos(np.deg2rad(_dec))
    zz = c_axis * np.sin(np.deg2rad(_dec))

    xx = xx.flatten()[:, None]
    yy = yy.flatten()[:, None]
    zz = zz.flatten()[:, None]
    dd = input_array.flatten()[:, None]
    coord = np.concatenate([xx, yy, zz], axis=1)
    #input_array_f = NearestNDInterpolator(coord, input_array.flatten())
    #input_array_f = Rbf(xx, yy, zz, input_array.flatten()[:, None], function='linear')

    (numz, numx, numy) = input_array.shape

    c1, c2 = zz.min(), zz.max()
    c_center = 0.5 * (c1 + c2)

    phys_dim = np.array([c2 - c1, xx.max() - xx.min(), yy.max() - yy.min()])

    n = np.array([numz, numx, numy])

    # Enlarge cube size by `pad` in each dimension, so raytraced cube
    # sits exactly within the gridded points.
    phys_dim = phys_dim * (n + pad).astype(float) / n.astype(float)
    c1 = c_center - (c_center - c1) * (n[0] + pad[0]) / float(n[0])
    c2 = c_center + (c2 - c_center) * (n[0] + pad[0]) / float(n[0])
    n = n + pad
    # now multiply by scaling for a finer sub-grid
    n = (refinement * n).astype('int')

    if feedback > 0:
        msg = "converting from obs. to physical coord\n"\
               "refinement=%s, pad=(%s, %s, %s)\n "\
               "(%d, %d, %d)->(%f to %f) x %f x %f\n "\
               "(%d, %d, %d) (h^-1 cMpc)^3\n" % \
                       ((refinement, ) +  tuple(pad) + (
                        numz, numx, numy, c1, c2,
                        phys_dim[1], phys_dim[2],
                        n[0], n[1], n[2]))
        msg += "dx = %f, dy = %f, dz = %f" % (
            abs(phys_dim[1]) / float(n[1] - 1),
            abs(phys_dim[2]) / float(n[2] - 1), abs(c2 - c1) / float(n[0] - 1))
        logger.debug(msg)
        print msg

    # this is wasteful in memory, but numpy can be pickled
    phys_map = algebra.make_vect(np.zeros(n), axis_names=('freq', 'ra', 'dec'))

    # TODO: should this be more sophisticated? N-1 or N?
    info = {}
    info['axes'] = ('freq', 'ra', 'dec')
    info['type'] = 'vect'
    info['freq_delta'] = abs(c2 - c1) / float(n[0] - 1)
    info['freq_centre'] = c1 + info['freq_delta'] * float(n[0] // 2)
    info['ra_delta'] = abs(phys_dim[1]) / float(n[1] - 1)
    info['ra_centre'] = 0.5 * (xx.max() + xx.min())
    info['dec_delta'] = abs(phys_dim[2]) / float(n[2] - 1)
    info['dec_centre'] = 0.5 * (yy.max() + yy.min())
    phys_map.info = info

    # same as np.linspace(c1, c2, n[0], endpoint=True)
    radius_axis = phys_map.get_axis("freq")
    x_axis = phys_map.get_axis("ra")
    y_axis = phys_map.get_axis("dec")

    _yy, _xx = np.meshgrid(y_axis, x_axis)
    #dd_f = NearestNDInterpolator(coord, dd)

    _pp = 0
    for i in range(radius_axis.shape[0]):
        if int(10 * i / float(radius_axis.shape[0])) > _pp:
            print '.',
            _pp = int(10 * i / float(radius_axis.shape[0]))
        #print '%3d '%i,
        _zz = radius_axis[i] * np.ones(_yy.shape)
        _sel = zz[:, 0] < radius_axis[i] + 1 * info['freq_delta']
        _sel *= zz[:, 0] > radius_axis[i] - 1 * info['freq_delta']
        if np.any(_sel):
            #dd_f = Rbf(xx[_sel], yy[_sel], zz[_sel], dd[_sel], function='linear')
            dd_f = NearestNDInterpolator(coord[_sel], dd[_sel])
            phys_map[i] = dd_f(_xx, _yy, _zz)[:, :, 0]
        #phys_map[i] = dd_f(_xx, _yy, _zz)[:, :, 0]
    print '. Done'

    #phys_map_npy = algebra.make_vect(phys_map_npy, axis_names=('freq', 'ra', 'dec'))
    #phys_map_npy.info = info
    return phys_map, info
Example #8
0
def physical_grid(input_array,
                  refinement=1,
                  pad=2,
                  order=0,
                  feedback=1,
                  mode='constant'):
    r"""Project from freq, ra, dec into physical coordinates

    Parameters
    ----------
    input_array: np.ndarray
        The freq, ra, dec map

    Returns
    -------
    cube: np.ndarray
        The cube projected back into physical coordinates

    """
    if not hasattr(pad, '__iter__'):
        pad = [pad, pad, pad]
    pad = np.array(pad)

    freq_axis = input_array.get_axis('freq')  #/ 1.e6
    ra_axis = input_array.get_axis('ra')
    dec_axis = input_array.get_axis('dec')

    nu_lower, nu_upper = freq_axis.min(), freq_axis.max()
    ra_fact = sp.cos(sp.pi * input_array.info['dec_centre'] / 180.0)
    thetax, thetay = np.ptp(ra_axis), np.ptp(dec_axis)
    thetax *= ra_fact
    (numz, numx, numy) = input_array.shape

    z1 = __nu21__ / nu_upper - 1.0
    z2 = __nu21__ / nu_lower - 1.0
    d1 = (cosmology.comoving_transverse_distance(z1) * cosmology.h).value
    d2 = (cosmology.comoving_transverse_distance(z2) * cosmology.h).value
    c1 = (cosmology.comoving_distance(z1) * cosmology.h).value
    c2 = (cosmology.comoving_distance(z2) * cosmology.h).value
    c1 = np.sqrt(c1**2. - (((0.5 * thetax * u.deg).to(u.rad)).value * d1)**2)
    c_center = (c1 + c2) / 2.

    # Make cube pixelisation finer, such that angular cube will
    # have sufficient resolution on the closest face.
    phys_dim = np.array([
        c2 - c1, ((thetax * u.deg).to(u.rad)).value * d2,
        ((thetay * u.deg).to(u.rad)).value * d2
    ])

    # Note that the ratio of deltas in Ra, Dec in degrees may
    # be different than the Ra, Dec in physical coordinates due to
    # rounding onto this grid
    #n = np.array([numz, int(d2 / d1 * numx), int(d2 / d1 * numy)])
    n = np.array([numz, numx, numy])

    # Enlarge cube size by `pad` in each dimension, so raytraced cube
    # sits exactly within the gridded points.
    phys_dim = phys_dim * (n + pad).astype(float) / n.astype(float)
    c1 = c_center - (c_center - c1) * (n[0] + pad[0]) / float(n[0])
    c2 = c_center + (c2 - c_center) * (n[0] + pad[0]) / float(n[0])
    n = n + pad
    # now multiply by scaling for a finer sub-grid
    n = (refinement * n).astype('int')

    if feedback > 0:
        msg = "converting from obs. to physical coord\n"\
               "refinement=%s, pad=(%s, %s, %s)\n "\
               "(%d, %d, %d)->(%f to %f) x %f x %f\n "\
               "(%d, %d, %d) (h^-1 cMpc)^3\n" % \
                       ((refinement, ) +  tuple(pad) + (
                        numz, numx, numy, c1, c2,
                        phys_dim[1], phys_dim[2],
                        n[0], n[1], n[2]))
        msg += "dx = %f, dy = %f, dz = %f" % (
            abs(phys_dim[1]) / float(n[1] - 1),
            abs(phys_dim[2]) / float(n[2] - 1), abs(c2 - c1) / float(n[0] - 1))
        logger.debug(msg)
        print msg

    # this is wasteful in memory, but numpy can be pickled
    phys_map_npy = np.zeros(n)
    phys_map = algebra.make_vect(phys_map_npy,
                                 axis_names=('freq', 'ra', 'dec'))
    #mask = np.ones_like(phys_map)
    mask = np.ones_like(phys_map_npy)

    # TODO: should this be more sophisticated? N-1 or N?
    info = {}
    info['axes'] = ('freq', 'ra', 'dec')
    info['type'] = 'vect'

    #info = {'freq_delta': abs(phys_dim[0])/float(n[0]),
    #        'freq_centre': abs(c2+c1)/2.,
    info['freq_delta'] = abs(c2 - c1) / float(n[0] - 1)
    info['freq_centre'] = c1 + info['freq_delta'] * float(n[0] // 2)

    info['ra_delta'] = abs(phys_dim[1]) / float(n[1] - 1)
    #info['ra_centre'] = info['ra_delta'] * float(n[1] // 2)
    info['ra_centre'] = 0.

    info['dec_delta'] = abs(phys_dim[2]) / float(n[2] - 1)
    #info['dec_centre'] = info['dec_delta'] * float(n[2] // 2)
    info['dec_centre'] = 0.

    phys_map.info = info
    #print info

    # same as np.linspace(c1, c2, n[0], endpoint=True)
    radius_axis = phys_map.get_axis("freq")
    x_axis = phys_map.get_axis("ra")
    y_axis = phys_map.get_axis("dec")

    # Construct an array of the redshifts on each slice of the cube.
    #comoving_inv = cosmo.inverse_approx(cosmology.comoving_distance, z1 * 0.9, z2 * 1.1)
    #za = comoving_inv(radius_axis)  # redshifts on the constant-D spacing
    _xp = np.linspace(z1 * 0.9, z2 * 1.1, 500)
    _fp = (cosmology.comoving_distance(_xp) * cosmology.h).value
    #comoving_inv = interp1d(_fp, _xp)
    #za = comoving_inv(radius_axis)  # redshifts on the constant-D spacing
    za = np.interp(radius_axis, _fp, _xp)
    nua = __nu21__ / (1. + za)

    gridy, gridx = np.meshgrid(y_axis, x_axis)
    interpol_grid = np.zeros((3, n[1], n[2]))

    for i in range(n[0]):
        # nua[0] = nu_upper, nua[1] = nu_lower
        #print nua[i], freq_axis[0], freq_axis[-1], (nua[i] - freq_axis[0]) / \
        #                                (freq_axis[-1] - freq_axis[0]) * numz
        #_radius_axis = np.sqrt(radius_axis[i]**2 + gridy**2 + gridx**2)
        #_radius_axis = np.sqrt(radius_axis[i]**2 + gridx**2)
        #za = np.interp(_radius_axis, _fp, _xp)
        #nua = __nu21__ / (1. + za)

        interpol_grid[0, :, :] = (nua[i] - freq_axis[0]) / \
                                 (freq_axis[-1] - freq_axis[0]) * numz
        proper_z = cosmology.comoving_transverse_distance(za[i]) * cosmology.h
        proper_z = proper_z.value

        angscale = ((proper_z * u.deg).to(u.rad)).value
        interpol_grid[1, :, :] = gridx / angscale / thetax * numx + numx / 2
        interpol_grid[2, :, :] = gridy / angscale / thetay * numy + numy / 2

        phys_map_npy[i, :, :] = sp.ndimage.map_coordinates(input_array,
                                                           interpol_grid,
                                                           order=order,
                                                           mode=mode)

        interpol_grid[1, :, :] = np.logical_or(interpol_grid[1, :, :] >= numx,
                                               interpol_grid[1, :, :] < 0)
        interpol_grid[2, :, :] = np.logical_or(interpol_grid[2, :, :] >= numy,
                                               interpol_grid[2, :, :] < 0)
        mask = np.logical_not(
            np.logical_or(interpol_grid[1, :, :], interpol_grid[2, :, :]))
        phys_map_npy *= mask

    phys_map_npy = algebra.make_vect(phys_map_npy,
                                     axis_names=('freq', 'ra', 'dec'))
    phys_map_npy.info = info
    return phys_map_npy, info
Example #9
0
#The number of frequencies
nfreq = len(frequency_lc)

# the lightcone cube that we fill out with foregrounds
fg_lightcone = np.zeros([Ngrid, Ngrid, nfreq])

# convert foreground to brightness temperature and fill up a cube with its evolution at each frequency
ii = 0
for jj in range(nfreq):
    fg_lightcone[:, :, ii] = lightcone_fg[:, :, jj] / (mpctoRadian(
        redshift_lc[jj], 500) / Ngrid) / mKtoJy_per_sr(frequency_lc[jj])
    ii += 1

# sort out the coords of box in Mpc
Lz_Mpc = cosmo.comoving_transverse_distance(
    redshift_lc[-1]).value - cosmo.comoving_transverse_distance(
        redshift_lc[0]).value + np.diff(
            cosmo.comoving_transverse_distance(redshift_lc).value)[0]

coords_xy = np.linspace(0, 500, Ngrid + 1)
coords_xy = coords_xy[1:] - np.diff(coords_xy)[0] / 2
coords_z = cosmo.comoving_transverse_distance(redshift_lc).value

new_coords_z = np.linspace(coords_z.min(), coords_z.max(), 100)

xx, yy, zz = np.meshgrid(coords_xy, coords_xy, new_coords_z, indexing='ij')

# interpolate the foreground across frequency to smoothen its evolution
f_lc_fg = scipy.interpolate.RegularGridInterpolator(
    [coords_xy, coords_xy, coords_z], fg_lightcone)
highres_lightcone_fg = f_lc(
Example #10
0
def main(args):
    print("Variation %s" % args.variation)
    print("FOV %d" % args.fov)

    boxsize_Mpc = 100
    N = 2400
    slicewidth = 0.05

    # Save redshift slices in 0.1 slices
    nslices = np.int(np.ceil(args.z_max / slicewidth))

    # Create catalogue image
    catalogue_image = np.zeros((
        nslices + 1,
        (args.fov * 3600) // args.catalogue_resolution,
        (args.fov * 3600) // args.catalogue_resolution,
    ))
    # ...and a halo image to compare visually
    halo_image = np.zeros((
        nslices + 1,
        (args.fov * 3600) // args.halo_resolution,
        (args.fov * 3600) // args.halo_resolution,
    ))
    halo_catalogue = np.zeros((0, 6))

    np.random.seed(args.variation)

    sim_dict = create_sim_dict()

    halos_dict = dict()
    halos_dict[0.025] = np.load("halosCM-z-0.025-m-0.004-c-0.035.npy")
    halos_dict[0.203] = np.load("halosCM-z-0.203-m-0.004-c-0.035.npy")
    halos_dict[0.309] = np.load("halosCM-z-0.309-m-0.004-c-0.035.npy")
    halos_dict[0.6] = np.load("halosCM-z-0.6-m-0.004-c-0.035.npy")

    box_count = -1
    while True:
        box_count += 1

        # Break loop if we've exceeded z_max
        z = z_at_value(Planck15.comoving_distance,
                       Quantity((box_count + 1) * boxsize_Mpc, "Mpc"))
        if z < args.z_min:
            continue
        if z > args.z_max:
            break

        z_snapshot = nearest_snapshot(z)
        print("Using snapshot:", z_snapshot)

        # Calculate ntiles here rather than in 25 Mpc loop, or else we might retile the 25 Mpc slices
        # differently
        fov_Mpc = np.radians(args.fov) * Planck15.comoving_transverse_distance(
            z).to_value("Mpc")
        ntiles = np.int(np.ceil(fov_Mpc / boxsize_Mpc))
        print("fov_Mpc:", fov_Mpc)
        print("ntiles:", ntiles)

        # Random offset
        x_offset, y_offset = np.random.randint(0, N, size=2)
        x_offset_Mpc, y_offset_Mpc = (x_offset / N) * boxsize_Mpc, (
            y_offset / N) * boxsize_Mpc
        print("x_offset:", x_offset, "x_offset_Mpc:", x_offset_Mpc)
        print("y_offset:", y_offset, "y_offset_Mpc:", y_offset_Mpc)

        # Process 25 Mpc slices for flux
        for slice_idx, offset_Mpc in enumerate([12.5, 37.5, 62.5, 87.5]):
            # if slice_idx != 0: continue ## REMOVE

            DC_Mpc = box_count * boxsize_Mpc + offset_Mpc  # radial comoving distance
            z = z_at_value(Planck15.comoving_distance, Quantity(DC_Mpc, "Mpc"))

            print("Redshift z", z, " DC_Mpc", DC_Mpc)

            lums_154MHz, alphas = sim_dict[z_snapshot][slice_idx]
            fluxes = (lums_154MHz * (1 + z)**(1 + alphas)) / (
                4 * np.pi * Planck15.luminosity_distance(z).to_value('m')**2)
            fluxes *= 1E26  # [W /m^2 /Hz] -> [Jy]

            # Apply offset
            fluxes = np.roll(fluxes, (y_offset, x_offset), (0, 1))

            if ntiles > 1:
                _fluxes = np.zeros((N * ntiles, N * ntiles))
                for nx in range(ntiles):
                    for ny in range(ntiles):
                        _fluxes[ny * N:(ny + 1) * N,
                                nx * N:(nx + 1) * N] = fluxes

                fluxes = _fluxes

            print("Fluxes map has shape:", fluxes.shape)

            comoving_transverse_distance_1deg_Mpc = np.radians(
                1) * Planck15.comoving_transverse_distance(z).to_value("Mpc")
            angular_width_of_box = boxsize_Mpc / comoving_transverse_distance_1deg_Mpc
            print("Angular width of box: ", angular_width_of_box)
            fluxres = angular_width_of_box / N

            painter(catalogue_image[np.int(z / slicewidth) + 1],
                    args.catalogue_resolution / 3600, fluxes, fluxres)
            painter(catalogue_image[0], args.catalogue_resolution / 3600,
                    fluxes, fluxres)

        # Now process halos
        halos = np.copy(
            halos_dict[z_snapshot])  # [mass, x, y, z, something, other]

        # Offset each
        halos[:, 1] += x_offset_Mpc
        halos[:, 1] %= boxsize_Mpc
        halos[:, 2] += y_offset_Mpc
        halos[:, 2] %= boxsize_Mpc
        halos[:, 3] += box_count * boxsize_Mpc

        # Retile
        if ntiles > 1:
            nhalos = halos.shape[0]
            _halos = np.zeros((nhalos * ntiles**2, halos.shape[1]))

            noffset = 0
            for nx in range(ntiles):
                for ny in range(ntiles):
                    _halos[noffset:noffset + nhalos, :] = halos
                    _halos[noffset:noffset + nhalos, 1] += nx * boxsize_Mpc
                    _halos[noffset:noffset + nhalos, 2] += ny * boxsize_Mpc
                    noffset += nhalos

            assert (noffset == len(_halos))
            halos = _halos

        # Center box
        halos[:, 1] -= (ntiles * boxsize_Mpc) / 2
        halos[:, 2] -= (ntiles * boxsize_Mpc) / 2
        print("Min/max x (Mpc):", halos[:, 1].min(), halos[:, 1].max())
        print("Min/max y (Mpc):", halos[:, 2].min(), halos[:, 2].max())
        print("Min/max z (Mpc):", halos[:, 3].min(), halos[:, 3].max())

        # Halos must be at least 1 Mpc away (z_at_value breaks for extremely close values)
        halos = halos[halos[:, 3] > 1]

        # Round DC value to aid computation
        halos[:, 3] = np.around(halos[:, 3], decimals=1)

        # Calculate angular position
        for DC_Mpc in np.unique(halos[:, 3]):

            z = z_at_value(Planck15.comoving_distance, Quantity(DC_Mpc, "Mpc"))

            comoving_transverse_distance_1deg_Mpc = np.radians(
                1) * Planck15.comoving_transverse_distance(z).to_value("Mpc")
            idxs = halos[:, 3] == DC_Mpc
            halos[idxs, 3] = z
            halos[idxs,
                  1] /= comoving_transverse_distance_1deg_Mpc  # -> degrees
            halos[idxs, 2] /= comoving_transverse_distance_1deg_Mpc

            # Now use column 4 to put in a pseudoflux
            halos[idxs,
                  4] = 1 / Planck15.luminosity_distance(z).to_value('Mpc')**2

        print("Min/max x (deg):", halos[:, 1].min(), halos[:, 1].max())
        print("Min/max y (deg):", halos[:, 2].min(), halos[:, 2].max())

        # Filter out values out of the FOV
        idx = np.all([
            halos[:, 1] >= -args.fov / 2, halos[:, 1] <= args.fov / 2,
            halos[:, 2] >= -args.fov / 2, halos[:, 2] <= args.fov / 2
        ],
                     axis=0)
        halos = halos[idx]

        halopainter(halo_image[0], args.halo_resolution / 3600, halos)
        for i, z in enumerate(np.arange(0, args.z_max, slicewidth)):
            idxs = np.all([halos[:, 3] > z, halos[:, 3] < z + slicewidth],
                          axis=0)
            if len(idxs):
                halopainter(halo_image[i + 1], args.halo_resolution / 3600,
                            halos[idxs])

        halo_catalogue = np.concatenate([halo_catalogue, halos], axis=0)

    np.save("halos-%d.npy" % args.variation, halo_catalogue)
    halo_catalogue = halo_catalogue[np.argsort(
        halo_catalogue[:, 0])]  # Order by mass
    try:
        os.mkdir("cones-%d" % args.variation)
    except OSError:
        pass
    zs = [0.01, 0.02] + list(np.arange(0.05, 1.06, 0.05))
    for i, z in enumerate(zs[:-1]):
        z_min = (zs[i - 1] + z) / 2
        z_max = (zs[i + 1] + z) / 2

        # Special case for z = 0.01
        if i == 0:
            z_min = 0

        idx = np.all(
            [halo_catalogue[:, 3] >= z_min, halo_catalogue[:, 3] < z_max],
            axis=0)
        # [mass, redshift, latitude, longitude]
        np.savetxt("cones-%d/cone_5X5_z%.02f.txt_sort" % (args.variation, z),
                   halo_catalogue[idx][:, [0, 3, 1, 2]],
                   header="mass redshift latitude longitude")

    for data, name, res in [(catalogue_image, 'web',
                             args.catalogue_resolution),
                            (halo_image, 'myhalos', args.halo_resolution)]:
        hdu = fits.PrimaryHDU(data=data)
        hdu.header["BUNIT"] = "JY/PIXEL"
        hdu.header["CTYPE1"] = "RA---SIN"
        hdu.header["CRPIX1"] = 0
        hdu.header["CRVAL1"] = 0
        hdu.header["CDELT1"] = -res / 3600
        hdu.header["CUNIT1"] = "deg"
        hdu.header["CTYPE2"] = "DEC--SIN"
        hdu.header["CRPIX2"] = 0
        hdu.header["CRVAL2"] = 0
        hdu.header["CDELT2"] = res / 3600
        hdu.header["CUNIT2"] = "deg"
        hdu.writeto("%s-%d.fits" % (name, args.variation), overwrite=True)
Example #11
0
def dL_dth(z):
    """
    Comoving transverse distance per radian in Mpc
    [cMpc]/radian
    """
    return cosmo.comoving_transverse_distance(z).value