def load_geometries(qids): imap = enmap.zeros(pshape, pwcs) imap2 = enmap.pad(imap, 900) shape, wcs = imap2.shape, imap2.wcs gs = {} for qid in qids: gs[qid] = shape, wcs return gs
def map_generate_final_map(numerics, cosmology, dndOmega, \ thetas, yprofiles, wcs):#{{{ start_total = time.time() if numerics['map_include_bias']: f = np.load(path + '/bias.npz') bias = f['bias'] # consistency checks#{{{ if dndOmega.shape[0] != numerics['map_Npoints_M']: print('dndOmega mass problem') print('dndOmega has ' + str(dndOmega.shape[0]) + ' mass entries') print('while we have ' + str(numerics['map_Npints_M']) + ' mass grid points') return if dndOmega.shape[1] != numerics['map_Npoints_z']: print('dndOmega redshift problem') print('dndOmega has ' + str(dndOmega.shape[1]) + ' redshift entries') print('while we have ' + str(numerics['map_Npints_z']) + ' redshift grid points') return if len(yprofiles) != numerics['map_Npoints_M']: print('yprofiles mass problem') print('yprofiles has ' + str(len(yprofiles)) + ' mass entries') print('while we have ' + str(numerics['map_Npoints_M']) + ' mass grid points') return if len(yprofiles[0]) != numerics['map_Npoints_z']: print('yprofiles redshift problem') print('yprofiles has ' + str(len(yprofiles[0])) + ' redshift entries') print('while we have ' + str(numerics['map_Npoints_z']) + ' redshift grid points') return #}}} # start off with a map of the desired size final_map = enmap.enmap(np.zeros((numerics['map_height_pix'], \ numerics['map_width_pix'])), \ wcs=wcs) # pad out to a square map, extended appropriately for tSZ code map_width_ext = int(numerics['map_width_pix'] / \ numerics['map_fraction']) map_height_ext = int(numerics['map_height_pix'] / \ numerics['map_fraction']) map_size_ext = max(map_width_ext, map_height_ext) spare_pix_hor = int((map_size_ext - \ numerics['map_width_pix']) / 2.0) spare_pix_ver = int((map_size_ext - \ numerics['map_height_pix']) / 2.0) ext_map = enmap.pad(final_map, [spare_pix_ver, spare_pix_hor]) map_area = (map_size_ext * numerics['map_pixel_size'])**2 ###print(final_map.shape) ###print(square_map.shape) ###print(map_size_ext) ###exit() # generate the tSZ signal for jj in xrange(numerics['map_Npoints_z']): if numerics['verbose']: print(str(jj)) start = time.time() if numerics['map_include_bias']: # fo each redshift bin, compute one random realization of the overdensity field delta_map = map_generate_linear_density_field( jj, numerics, cosmology, path) delta_map = delta_map.flatten() for ii in xrange(numerics['map_Npoints_M']): if numerics['map_Poisson']: cluster_number = np.random.poisson(dndOmega[ii, jj] * map_area) else: middle = dndOmega[ii, jj] * map_area lower = np.floor(middle) upper = np.ceil(middle) if np.random.rand() < (middle - lower): cluster_number = int(upper) else: cluster_number = int(lower) if cluster_number > 0: if numerics['map_include_bias']: #probabilities = 1. + bias[ii,jj] * delta_map #probabilities[np.where(probabilities<0.)] = 0. #probabilities /= sum(probabilities) probabilities = map_get_probabilities( bias[ii, jj], delta_map) central_pixels = np.random.choice(len(delta_map), p=probabilities, replace=True, size=cluster_number) central_pixels_x = np.zeros(cluster_number, dtype=int) central_pixels_y = np.zeros(cluster_number, dtype=int) for kk in xrange(cluster_number): central_pixels_x[ kk] = central_pixels[kk] / ext_map.shape[0] central_pixels_y[ kk] = central_pixels[kk] % ext_map.shape[0] else: central_pixels_x = np.random.random_integers( 0, ext_map.shape[0] - 1, size=cluster_number) central_pixels_y = np.random.random_integers( 0, ext_map.shape[1] - 1, size=cluster_number) random_offset_x = np.random.rand() - 0.5 random_offset_y = np.random.rand() - 0.5 t = thetas[ii][jj] y = yprofiles[ii][jj] y_interpolator = interp1d(t, y, kind='cubic', bounds_error=False, fill_value=(max(y), 0.)) T_of_theta = lambda theta: T(cosmology, y_interpolator(theta)) this_map = np.zeros( (2 * int(max(t) / numerics['map_pixel_size']) + 5, 2 * int(max(t) / numerics['map_pixel_size']) + 5)) # want central pixel to be on center of the cluster pixel_indices_x = np.linspace(-(this_map.shape[0] - 1) / 2., (this_map.shape[0] - 1) / 2., num=this_map.shape[0]) pixel_indices_y = np.linspace(-(this_map.shape[1] - 1) / 2., (this_map.shape[1] - 1) / 2., num=this_map.shape[1]) # average over angles nn = 0 for kk in xrange(-numerics['map_grid_per_pixel'], numerics['map_grid_per_pixel'] + 1): for ll in xrange(-numerics['map_grid_per_pixel'], numerics['map_grid_per_pixel'] + 1): angles = numerics['map_pixel_size'] * np.sqrt( np.add.outer( (pixel_indices_x + random_offset_x + float(kk) / float(numerics['map_grid_per_pixel'] + 0.5)) **2., (pixel_indices_y + random_offset_y + float(ll) / float(numerics['map_grid_per_pixel'] + 0.5)) **2.)) this_map += T_of_theta(angles) nn += 1 this_map *= 1. / float(nn) ext_map = throw_clusters(cluster_number, ext_map, this_map, central_pixels_x, central_pixels_y) end = time.time() if numerics['verbose']: print( str((numerics['map_Npoints_z'] - jj) * (end - start) / 60.) + ' minutes remaining in map_generate_final_map') #print('I am in index = ' + str(index)) ''' # need to take a subset of the final map, since otherwise we're getting a bias (centres of clusters are currently always in the map) spare_pixels_horizontal = int((1.-numerics['map_fraction'])/2.*final_map.shape[0]) spare_pixels_vertical = int((1.-numerics['map_fraction'])/2.*final_map.shape[1]) hist = map_get_histogram(final_map[spare_pixels_horizontal:-spare_pixels_horizontal-1,spare_pixels_vertical:-spare_pixels_vertical-1]) np.savez(path + '/p_' + str(index) + '.npz', p = hist) #inal_map = final_map[spare_pixels_horizontal:-spare_pixels_horizontal-1,spare_pixels_vertical:-spare_pixels_vertical-1] # Now do the apodization to get the power spectrum final_map[:spare_pixels_horizontal, :] *= np.linspace(0.*np.ones(final_map.shape[1]), 1.*np.ones(final_map.shape[1]), num = spare_pixels_horizontal, axis = 0) final_map[-spare_pixels_horizontal:, :] *= np.linspace(0.*np.ones(final_map.shape[1]), 1.*np.ones(final_map.shape[1]), num = spare_pixels_horizontal, axis = 0)[::-1, :] final_map[:, :spare_pixels_vertical] *= np.linspace(0.*np.ones(final_map.shape[0]), 1.*np.ones(final_map.shape[0]), num = spare_pixels_vertical, axis = 1) final_map[:, -spare_pixels_vertical:] *= np.linspace(0.*np.ones(final_map.shape[0]), 1.*np.ones(final_map.shape[0]), num = spare_pixels_vertical, axis = 1)[:, ::-1] #plt.matshow(final_map) #plt.show() #np.savez(path + '/final_map_' + str(index) + '.npz', final_map = final_map) ell, Cell = map_get_powerspectrum(numerics, final_map) np.savez(path + '/PS_' + str(index) + '.npz', ell = ell, Cell = Cell) end_total = time.time() if numerics['verbose'] : print 'used ' + str((end_total - start_total)/60.) + ' minutes in total' #plt.loglog(ell, Cell) #plt.savefig('tSZ_power_spectrum.pdf') #plt.show() ''' # @TODO: add apodization scale to numerical_parameters? or # always use multiple sigma? # now smooth with instrumental beam. first, trim to a map of # the desired size plus a small buffer for apodization to # minimize ringing from harmonic-space smoothing map_width_apod = numerics['map_width_pix'] + 100 map_height_apod = numerics['map_height_pix'] + 100 spare_pix_hor = int((map_size_ext - map_width_apod) / 2.0) spare_pix_ver = int((map_size_ext - map_height_apod) / 2.0) apod_map = ext_map[spare_pix_ver: spare_pix_ver + map_height_apod, \ spare_pix_hor: spare_pix_hor + map_width_apod] apod_map = enmap.apod(apod_map, 25) beam_sigma = cosmology['beam_fwhm_arcmin'] * \ np.pi / 180.0 / 60.0 / \ np.sqrt(8.0 * np.log(2.0)) apod_map = enmap.smooth_gauss(apod_map, beam_sigma) # finally, trim off the apodization padding spare_pix_hor = int((map_width_apod - \ numerics['map_width_pix']) / 2.0) spare_pix_ver = int((map_height_apod - \ numerics['map_height_pix']) / 2.0) final_map = apod_map[spare_pix_ver: \ spare_pix_ver + numerics['map_height_pix'], \ spare_pix_hor: \ spare_pix_hor + numerics['map_width_pix']] end_total = time.time() if numerics['verbose']: print('used ' + str((end_total - start_total) / 60.) + ' minutes in total') return final_map
def combine_tiles(ipathfmt, opathfmt, combine=2, downsample=2, itile1=(None, None), itile2=(None, None), tyflip=False, txflip=False, pad_to=None, comm=None, verbose=False): """Given a set of tiles on disk at locaiton ipathfmt % {"y":...,"x"...}, combine them into larger tiles, downsample and write the result to opathfmt % {"y":...,"x":...}. x and y must be contiguous and start at 0. reftile[2] indicates the tile coordinates of the first valid input tile. This needs to be specified if not all tiles of the logical tiling are physically present. tyflip and txflip indicate if the tiles coordinate system is reversed relative to the pixel coordinates or not." """ # Expand combine and downsample to 2d combine = np.zeros(2, int) + combine downsample = np.zeros(2, int) + downsample if pad_to is not None: pad_to = np.zeros(2, int) + pad_to # Handle optional mpi rank, size = (comm.rank, comm.size) if comm is not None else (0, 1) # Find the range of input tiles itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2) # Read the first tile to get its size information ibase = enmap.read_map(ipathfmt % {"y": itile1[0], "x": itile1[1]}) * 0 # Find the set of output tiles we need to consider otile1 = itile1 // combine otile2 = (itile2 - 1) // combine + 1 # And loop over them oyx = [(oy, ox) for oy in range(otile1[0], otile2[0]) for ox in range(otile1[1], otile2[1])] for i in range(rank, len(oyx), size): oy, ox = oyx[i] # Read in all associated tiles into a list of lists rows = [] for dy in range(combine[0]): iy = oy * combine[0] + dy if iy >= itile2[0]: continue cols = [] for dx in range(combine[1]): ix = ox * combine[1] + dx if ix >= itile2[1]: continue if iy < itile1[0] or ix < itile1[1]: # The first tiles are missing on disk, but are # logically a part of the tiling. Use ibase, # which has been zeroed out. cols.append(ibase) else: itname = ipathfmt % {"y": iy, "x": ix} cols.append(enmap.read_map(itname)) if txflip: cols = cols[::-1] rows.append(cols) # Stack them next to each other into a big tile if tyflip: rows = rows[::-1] omap = enmap.tile_maps(rows) # Downgrade if necessary if np.any(downsample > 1): omap = enmap.downgrade(omap, downsample) if pad_to is not None: # Padding happens towards the end of the tiling, # which depends on the flip status padding = np.array( [[0, 0], [pad_to[0] - omap.shape[-2], pad_to[1] - omap.shape[-1]]]) if tyflip: padding[:, 0] = padding[::-1, 0] if txflip: padding[:, 1] = padding[::-1, 1] omap = enmap.pad(omap, padding) # And output otname = opathfmt % {"y": oy, "x": ox} utils.mkdir(os.path.dirname(otname)) enmap.write_map(otname, omap) if verbose: print(otname)
def compute_map(self,oshape,owcs,qid,pixwin_taper_deg=0.3,pixwin_pad_deg=0.3, include_cmb=True,include_tsz=True,include_fgres=True,sht_beam=True): """ 1. get total alm 2. apply beam, and pixel window if Planck 3. ISHT 4. if ACT, apply a small taper and apply pixel window in Fourier space """ # pad to a slightly larger geometry tot_pad_deg = pixwin_taper_deg + pixwin_pad_deg res = maps.resolution(oshape,owcs) pix = np.deg2rad(tot_pad_deg)/res omap = enmap.pad(enmap.zeros((3,)+oshape,owcs),pix) ishape,iwcs = omap.shape[-2:],omap.wcs # get data model dm = sints.models[sints.arrays(qid,'data_model')](region_shape=ishape,region_wcs=iwcs,calibrated=True) # 1. get total alm array_index = self.qids.index(qid) tot_alm = int(include_cmb)*self.alms['cmb'] if include_tsz: try: assert self.tsz_fnu.ndim==2 tot_alm[0] = tot_alm[0] + hp.almxfl(self.alms['comptony'][0] ,self.tsz_fnu[array_index]) except: tot_alm[0] = tot_alm[0] + self.alms['comptony'][0] * self.tsz_fnu[array_index] if self.cfgres is not None: tot_alm[0] = tot_alm[0] + int(include_fgres)*self.alms['fgres'][array_index] assert tot_alm.ndim==2 assert tot_alm.shape[0]==3 ells = np.arange(self.lmax+1) # 2. get beam, and pixel window for Planck if sht_beam: beam = tutils.get_kbeam(qid,ells,sanitize=False,planck_pixwin=False) # NEVER SANITIZE THE BEAM IN A SIMULATION!!! for i in range(3): tot_alm[i] = hp.almxfl(tot_alm[i],beam) if dm.name=='planck_hybrid': pixwint,pixwinp = hp.pixwin(nside=tutils.get_nside(qid),lmax=self.lmax,pol=True) tot_alm[0] = hp.almxfl(tot_alm[0],pixwint) tot_alm[1] = hp.almxfl(tot_alm[1],pixwinp) tot_alm[2] = hp.almxfl(tot_alm[2],pixwinp) # 3. ISHT omap = curvedsky.alm2map(np.complex128(tot_alm),omap,spin=[0,2]) assert omap.ndim==3 assert omap.shape[0]==3 if not(sht_beam): taper,_ = maps.get_taper_deg(ishape,iwcs,taper_width_degrees=pixwin_taper_deg,pad_width_degrees=pixwin_pad_deg) modlmap = omap.modlmap() beam = tutils.get_kbeam(qid,modlmap,sanitize=False,planck_pixwin=True) kmap = enmap.fft(omap*taper,normalize='phys') kmap = kmap * beam # 4. if ACT, apply a small taper and apply pixel window in Fourier space if dm.name=='act_mr3': if sht_beam: taper,_ = maps.get_taper_deg(ishape,iwcs,taper_width_degrees=pixwin_taper_deg,pad_width_degrees=pixwin_pad_deg) pwin = tutils.get_pixwin(ishape[-2:]) if sht_beam: omap = maps.filter_map(omap*taper,pwin) else: kmap = kmap * pwin if not(sht_beam): omap = enmap.ifft(kmap,normalize='phys').real return enmap.extract(omap,(3,)+oshape[-2:],owcs)
from soapack import interfaces as sints from pixell import enmap, utils as putils, bunch from tilec import tiling, kspace, ilc, pipeline, fg as tfg from orphics import mpi, io, maps, catalogs, cosmology from enlib import bench from enlib.pointsrcs import sim_srcs from tilec.utils import coadd, is_planck, apodize_zero, get_splits, get_splits_ivar, robust_ref, filter_div, get_kbeam, load_geometries, get_specs from szar import foregrounds as szfg b = bunch.Bunch seed = 0 parent_qid = 'd56_01' # qid of array whose geometry will be used for the full map ishape, iwcs = load_geometries([parent_qid])[parent_qid] imap = enmap.zeros(ishape, iwcs) imap2 = enmap.pad(imap, 900) shape, wcs = imap2.shape, imap2.wcs nsplits = 2 """ We will make 1 sim of: 1. Unlensed CMB 2. SZ Point sources 3. a Planck-143 like 2 split system 4. a Planck-100 like 2 split system 5. an S16 pa2 like 2 split system 6. an S16 pa3 like 2 split system with uncorrelated 90 and 150 """ # Make the unlensed CMB realization
comm=comm, width_deg=4., pix_arcmin=0.5) for solution in solutions: ta.initialize_output(name=solution) down = lambda x, n=2: enmap.downgrade(x, n) if args.dtiles is not None: dtiles = [int(x) for x in args.dtiles.split(',')] else: dtiles = [] if comm.rank == 0: enmap.write_map( os.environ['WORK'] + "/sim_tiling/ivar_ones.fits", enmap.pad(enmap.ones((2, ) + pshape[-2:], pwcs), 900) * 0 + 1) comm.Barrier() for i, extracter, inserter, eshape, ewcs in ta.tiles( from_file=True): # this is an MPI loop # What is the shape and wcs of the tile? is this needed? aids = [] kdiffs = [] ksplits = [] kcoadds = [] masks = [] lmins = [] lmaxs = [] do_radial_fit = [] hybrids = [] friends = {}
version = 'v4.0_mask_version_mr3c_20190215_pickupsub_190301' season, array, patch, freq = ('s13', 'pa1', 'deep1', 'f150') shape, wcs = simgen.get_default_geometry(version, season, patch, array, freq) SG = signal.SignalGen() emaps = SG.get_signal_sim(season, patch, array, freq, 0, 0, oshape=shape, owcs=wcs) cmbmaps = SG.get_cmb_sim(season, patch, array, freq, 0, 0, oshape=shape, owcs=wcs) fgmaps = SG.get_fg_sim(season, patch, array, freq, 0, 0, oshape=shape, owcs=wcs) make_plots('cmb', cmbmaps) make_plots('cmb_fg', emaps) make_plots('fg', fgmaps) make_plots('diff_fg', emaps-cmbmaps) template = cmbmaps[0] template = enmap.pad(template, 100) shape, wcs = template.shape, template.wcs SG = signal.SignalGen(extract_region_shape=shape, extract_region_wcs=wcs) cmbmaps = SG.get_cmb_sim('s15', 'pa3', 'deep56', 'f090', 0, 0) make_plots('cmb_ext1', cmbmaps) extract_region = enmap.zeros(shape, wcs) SG = signal.SignalGen(extract_region=extract_region) cmbmaps = SG.get_cmb_sim('s15', 'pa3', 'deep56', 'f090', 0, 0) make_plots('cmb_ext2', cmbmaps) '''
def sim_srcs(shape, wcs, srcs, beam, omap=None, dtype=None, nsigma=5, rmax=None, smul=1, return_padded=False, pixwin=False, op=np.add, wrap="auto", verbose=False, cache=None): """Simulate a point source map in the geometry given by shape, wcs for the given srcs[nsrc,{dec,ra,T...}], using the beam[{r,val},npoint], which must be equispaced. If omap is specified, the sources will be added to it in place. All angles are in radians. The beam is only evaluated up to the point where it reaches exp(-0.5*nsigma**2) unless rmax is specified, in which case this gives the maximum radius. smul gives a factor to multiply the resulting source model by. This is mostly useful in conction with omap. The source simulation is sped up by using a source lookup grid. """ if omap is None: omap = enmap.zeros(shape, wcs, dtype) ishape = omap.shape omap = omap.preflat ncomp = omap.shape[0] # Set up wrapping if wrap is "auto": wrap = [0, utils.nint(360. / wcs.wcs.cdelt[0])] # In keeping with the rest of the functions here, srcs is [nsrc,{dec,ra,T,Q,U}]. # The beam parameters are ignored - the beam argument is used instead amps = srcs[:, 2:2 + ncomp] poss = srcs[:, :2].copy() # Rewind positions to let us use flat-sky approximation for distance calculations ref = np.mean(enmap.box(shape, wcs, corner=False)[:, 1]) poss[:, 1] = utils.rewind(poss[:, 1], ref) beam = expand_beam(beam, nsigma, rmax) rmax = nsigma2rmax(beam, nsigma) # Pad our map by rmax, so we get the contribution from sources # just ourside our area. We will later split our map into cells of size cres. Let's # adjust the padding so we have a whole number of cells minshape = np.min(omap[..., 5:-5:10, 5:-5:10].pixshapemap() / 10, (-2, -1)) cres = np.maximum(1, utils.nint(rmax / minshape)) epix = cres - (omap.shape[-2:] + 2 * cres) % cres padding = [cres, cres + epix] wmap, wslice = enmap.pad(omap, padding, return_slice=True) # Overall we will have this many grid cells cshape = wmap.shape[-2:] / cres # Find out which sources matter for which cells srcpix = wmap.sky2pix(poss.T).T pixbox = np.array([[0, 0], wmap.shape[-2:]], int) nhit, cell_srcs = build_src_cells(pixbox, srcpix, cres, wrap=wrap) # Optionally cache the posmap if cache is None or cache[0] is None: posmap = wmap.posmap() else: posmap = cache[0] if cache is not None: cache[0] = posmap model = eval_srcs_loop(posmap, poss, amps, beam, cres, nhit, cell_srcs, dtype=wmap.dtype, op=op, verbose=verbose) del posmap if pixwin: model = enmap.apply_window(model) # Update our work map, through our view if smul != 1: model *= smul wmap = op(wmap, model, wmap) if not return_padded: # Copy out omap[:] = wmap[wslice] # Restore shape omap = omap.reshape(ishape) return omap else: return wmap.reshape(ishape[:-2] + wmap.shape[-2:]), wslice
mask = sints.get_act_mr3_crosslinked_mask( mpatch, version=in_versions[survey], kind='binary_apod', season="s16" if survey == 'advact' else None, array=None, pad=pad) print(survey, patch) # FFT friendliness Ny, Nx = mask.shape[-2:] dNy = fft.fft_len(Ny, "above") dNx = fft.fft_len(Nx, "above") pny = dNy - Ny pnx = dNx - Nx pady1 = pny // 2 pady2 = pny - pady1 padx1 = pnx // 2 padx2 = pnx - padx1 mask = enmap.pad(mask, ((pady1, padx1), (pady2, padx2))) assert mask.shape[-2] == dNy assert mask.shape[-1] == dNx enmap.write_map(out_path + "%s.fits" % patch, mask) io.hplot(enmap.downgrade(mask, 8), out_path + "%s" % patch) sN = get_smooth_N(patch) smoothed = mask_smoothing(mask, sN) enmap.write_map(out_path + "%s_smoothed.fits" % patch, smoothed) io.hplot(enmap.downgrade(smoothed, 8), out_path + "%s_smoothed" % patch)