def write(fname, data, splits=None, type="auto", maxmaps=1000, **kwargs): import numpy as np from pixell import enmap, utils if type == "auto": type = infer_type(fname) if type == "zip": work, flexopen = zipfile.ZipFile(fname, "w"), zip_flexopen elif type == "dir": utils.mkdir(fname) work, flexopen = fname, dir_flexopen else: raise ValueError("Unrecognized type '%s'" % str(type)) with flexopen(work, "info.txt", "w") as f: write_info(f, data) with flexopen(work, "beam.txt", "w") as f: np.savetxt(f, np.array([np.arange(len(data.beam)), data.beam]).T, fmt="%5.0f %15.7e") for i, m in enumerate(data.maps): with flexopen(work, "map%d.fits" % (i + 1), "w") as f: enmap.write_fits(f, m) for i, m in enumerate(data.ivars): with flexopen(work, "ivar%d.fits" % (i + 1), "w") as f: enmap.write_fits(f, m)
def write_results(odir, res, padding=0, tag=None): def unpad(map): if padding == 0: return map else: return map[..., padding:-padding, padding:-padding] def fix(map): return unpad(enmap.apply_window(map)) / res.fconvs[:, None, None, None] utils.mkdir(odir) suffix = "" if tag is None else "_" + tag enmap.write_map("%s/map%s.fits" % (odir, suffix), fix(res.maps)) enmap.write_map("%s/model%s.fits" % (odir, suffix), fix(res.model)) enmap.write_map("%s/resid%s.fits" % (odir, suffix), fix(res.maps - res.model)) if res.snr is not None: enmap.write_map("%s/map_snr%s.fits" % (odir, suffix), unpad(res.snr)) if res.resid_snr is not None: enmap.write_map("%s/resid_snr%s.fits" % (odir, suffix), unpad(res.resid_snr)) # If we have indices of the catalog objects into a predefined catalog, then # append that as a new field, so we can use it in merge_results later cat = recfunctions.append_fields( res.cat, "inds", res.inds) if res.inds is not None else res.cat pointsrcs.write_sauron("%s/cat%s.fits" % (odir, suffix), cat)
"Pixel condition number below which polarization is dropped to make total intensity more stable. Should be a high value for single-tod maps to avoid thin stripes with really high noise" ) config.default( "map_sys", "equ", "The coordinate system of the maps. Can be eg. 'hor', 'equ' or 'gal'.") config.default("map_dist", False, "Whether to use distributed maps") parser = config.ArgumentParser() parser.add_argument("sel", help="TOD selction query") parser.add_argument("area", help="Geometry to map") parser.add_argument("odir", help="Output directory") parser.add_argument("prefix", nargs="?", help="Output file name prefix") parser.add_argument("--dets", type=str, default=0, help="Detector slice") args = parser.parse_args() utils.mkdir(args.odir) comm = mpi.COMM_WORLD dtype = np.float32 if config.get("map_bits") == 32 else np.float64 ncomp = 3 tsize = 720 root = args.odir + "/" + (args.prefix + "_" if args.prefix else "") down = config.get("downsample") # Set up logging utils.mkdir(root + ".log") logfile = root + ".log/log%03d.txt" % comm.rank log_level = log.verbosity2level(config.get("verbosity")) L = log.init(level=log_level, file=logfile, rank=comm.rank, shared=True) # Set up our geometry shape, wcs = enmap.read_map_geometry(args.area) shape = (ncomp, ) + shape[-2:] msys = config.get("map_sys")
# wmap = enmap.ifft(enmap.fft(wmap)*matched_filter).real**2 # lim = max(np.median(wmap)*tol1**2, np.max(wmap)*tol2**2) # mask |= wmap > lim # return mask comm = mpi.COMM_WORLD beam1d = get_beam(args.beam) ifiles = sorted(sum([glob.glob(ifile) for ifile in args.ifiles], [])) for ind in range(comm.rank, len(ifiles), comm.size): ifile = ifiles[ind] if args.verbose: print(ifile) ofile = args.odir + "/" + ifile imap = enmap.read_map(ifile) if args.mask is not None: mask = imap == args.mask if args.apodize: imap = imap.apod(args.apodize) # We will apply a semi-matched-filter to T l = np.maximum(1, imap.modlmap()) beam2d = enmap.samewcs(np.interp(l, np.arange(len(beam1d)), beam1d), imap) matched_filter = (1 + (l / args.lknee)**args.alpha)**-1 * beam2d fmap = enmap.map2harm(imap, iau=True) fmap[0] *= matched_filter omap = enmap.ifft(fmap).real if args.mask is not None: omap[mask] = 0 del mask utils.mkdir(os.path.dirname(ofile)) enmap.write_map(ofile, omap) del omap
def combine_tiles(ipathfmt, opathfmt, combine=2, downsample=2, itile1=(None, None), itile2=(None, None), tyflip=False, txflip=False, pad_to=None, comm=None, verbose=False): """Given a set of tiles on disk at locaiton ipathfmt % {"y":...,"x"...}, combine them into larger tiles, downsample and write the result to opathfmt % {"y":...,"x":...}. x and y must be contiguous and start at 0. reftile[2] indicates the tile coordinates of the first valid input tile. This needs to be specified if not all tiles of the logical tiling are physically present. tyflip and txflip indicate if the tiles coordinate system is reversed relative to the pixel coordinates or not." """ # Expand combine and downsample to 2d combine = np.zeros(2, int) + combine downsample = np.zeros(2, int) + downsample if pad_to is not None: pad_to = np.zeros(2, int) + pad_to # Handle optional mpi rank, size = (comm.rank, comm.size) if comm is not None else (0, 1) # Find the range of input tiles itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2) # Read the first tile to get its size information ibase = enmap.read_map(ipathfmt % {"y": itile1[0], "x": itile1[1]}) * 0 # Find the set of output tiles we need to consider otile1 = itile1 // combine otile2 = (itile2 - 1) // combine + 1 # And loop over them oyx = [(oy, ox) for oy in range(otile1[0], otile2[0]) for ox in range(otile1[1], otile2[1])] for i in range(rank, len(oyx), size): oy, ox = oyx[i] # Read in all associated tiles into a list of lists rows = [] for dy in range(combine[0]): iy = oy * combine[0] + dy if iy >= itile2[0]: continue cols = [] for dx in range(combine[1]): ix = ox * combine[1] + dx if ix >= itile2[1]: continue if iy < itile1[0] or ix < itile1[1]: # The first tiles are missing on disk, but are # logically a part of the tiling. Use ibase, # which has been zeroed out. cols.append(ibase) else: itname = ipathfmt % {"y": iy, "x": ix} cols.append(enmap.read_map(itname)) if txflip: cols = cols[::-1] rows.append(cols) # Stack them next to each other into a big tile if tyflip: rows = rows[::-1] omap = enmap.tile_maps(rows) # Downgrade if necessary if np.any(downsample > 1): omap = enmap.downgrade(omap, downsample) if pad_to is not None: # Padding happens towards the end of the tiling, # which depends on the flip status padding = np.array( [[0, 0], [pad_to[0] - omap.shape[-2], pad_to[1] - omap.shape[-1]]]) if tyflip: padding[:, 0] = padding[::-1, 0] if txflip: padding[:, 1] = padding[::-1, 1] omap = enmap.pad(omap, padding) # And output otname = opathfmt % {"y": oy, "x": ox} utils.mkdir(os.path.dirname(otname)) enmap.write_map(otname, omap) if verbose: print(otname)
def retile(ipathfmt, opathfmt, itile1=(None, None), itile2=(None, None), otileoff=(0, 0), otilenum=(None, None), ocorner=(-np.pi / 2, -np.pi), otilesize=(675, 675), comm=None, verbose=False, slice=None, wrap=True): """Given a set of tiles on disk with locations ipathfmt % {"y":...,"x":...}, retile them into a new tiling and write the result to opathfmt % {"y":...,"x":...}. The new tiling will have tile size given by otilesize[2]. Negative size means the tiling will to down/left instead of up/right. The corner of the tiling will be at sky coordinates ocorner[2] in radians. The new tiling will be pixel- compatible with the input tiling - w.g. the wcs will only differ by crpix. The output tiling will logically cover the whole sky, but only output tiles that overlap with input tiles will actually be written. This can be modified by using otileoff[2] and otilenum[2]. otileoff gives the tile indices of the corner tile, while otilenum indicates the number of tiles to write.""" # Set up mpi rank, size = (comm.rank, comm.size) if comm is not None else (0, 1) # Expand any scalars if otilesize is None: otilesize = (675, 675) otilesize = np.zeros(2, int) + otilesize otileoff = np.zeros(2, int) + otileoff # Find the range of input tiles itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2) # To fill in the rest of the information we need to know more # about the input tiling, so read the first tile ibase = enmap.read_map(ipathfmt % {"y": itile1[0], "x": itile1[1]}) if slice: ibase = eval("ibase" + slice) itilesize = ibase.shape[-2:] ixres = ibase.wcs.wcs.cdelt[0] nphi = utils.nint(360 / np.abs(ixres)) ntile_wrap = nphi // otilesize[1] # Find the pixel position of our output corners according to the wcs. # This is the last place we need to do a coordinate transformation. # All the rest can be done in pure pixel logic. pixoff = np.round(ibase.sky2pix(ocorner)).astype(int) # Find the range of output tiles def pix2otile(pix, ioff, osize): return (pix - ioff) // osize otile1 = pix2otile(itile1 * itilesize, pixoff, otilesize) otile2 = pix2otile(itile2 * itilesize - 1, pixoff, otilesize) otile1, otile2 = np.minimum(otile1, otile2), np.maximum(otile1, otile2) otile2 += 1 # We can now loop over output tiles cache = [None, None, None] oyx = [(oy, ox) for oy in range(otile1[0], otile2[0]) for ox in range(otile1[1], otile2[1])] for i in range(rank, len(oyx), size): otile = np.array(oyx[i]) # Find out which input tiles overlap with this output tile. # Our tile stretches from opix1:opix2 relative to the global input pixels opix1 = otile * otilesize + pixoff opix2 = (otile + 1) * otilesize + pixoff # output tiles and input tiles may increase in opposite directions opix1, opix2 = np.minimum(opix1, opix2), np.maximum(opix1, opix2) try: omap = read_area(ipathfmt, [opix1, opix2], itile1=itile1, itile2=itile2, cache=cache, slice=slice) except (IOError, OSError): continue x = otile[1] + otileoff[1] if wrap: x %= ntile_wrap oname = opathfmt % {"y": otile[0] + otileoff[0], "x": x} utils.mkdir(os.path.dirname(oname)) enmap.write_map(oname, omap) if verbose: print(oname)
return np.polynomial.legendre.legval(np.cos(theta), prefactor * cl_full) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("sdir", help='Spectra directory') parser.add_argument("odir", help='Output directory') parser.add_argument("--min-bin", type=int, default=0, help='Specify the index of first bin to be plotted') args = parser.parse_args() if rank == 0: utils.mkdir(args.odir) subdirs = get_subdirs(args.sdir) for subdir in subdirs[rank::comm.Get_size()]: print('[rank {:03d}]: plotting spectra for {}'.format(rank, subdir)) outdir = opj(args.odir, subdir) utils.mkdir(outdir) ells = np.load(opj(args.sdir, subdir, 'ell.npy')) theta = np.radians(np.logspace(-3, np.log10(179), num=4000)) # Cross spectra. cl = np.load(opj(args.sdir, subdir, 'cb.npy'))
if args.verbose: print("Writing %s" % ohit) enmap.write_map(ohit, w) # Two cases: Normal enmaps or dmaps if not os.path.isdir(imaps[0]): # Normal monotlithic map coadd_maps(imaps, ihits, args.omap, args.ohit, cont=args.cont, ncomp=args.ncomp) else: # Dmap. Each name is actually a directory, but they # all have compatible tile names. tilenames = get_tilenames(imaps[0]) utils.mkdir(args.omap) utils.mkdir(args.ohit) for tilename in tilenames[comm.rank::comm.size]: timaps = ["%s/%s" % (imap, tilename) for imap in imaps] tihits = ["%s/%s" % (ihit, tilename) for ihit in ihits] print("%3d %s" % (comm.rank, tilename)) coadd_maps(timaps, tihits, args.omap + "/" + tilename, args.ohit + "/" + tilename, cont=args.cont, ncomp=args.ncomp) if args.verbose: print("Done")
def search_maps_tiled(ifiles, odir, tshape=(1000, 1000), margin=100, padding=150, mode="find", icat=None, box=None, pixbox=None, sel=None, mask=None, templates=default_templates, cl_cmb=None, freq0=98.0, nmat1="constcorr", nmat2="constcorr", snr1=5, snr2=4, comps="TQU", dtype=np.float32, comm=None, cont=False, sim_cat=None, sim_noise=False, verbose=False): wdir = odir + "/work" utils.mkdir(wdir) if comm is None: comm = bunch.Bunch(rank=0, size=1) tshape = np.zeros(2, int) + tshape meta = mapdata.read_meta(ifiles[0]) # Allow us to slice the map that will be tiled geo = enmap.Geometry(*meta.map_geometry) if pixbox is not None or box is not None: geo = geo.submap(pixbox=pixbox, box=box) if sel is not None: geo = geo[sel] shape = np.array(geo.shape[-2:]) ny, nx = (shape + tshape - 1) // tshape def is_done(ty, tx): return os.path.isfile("%s/cat_%03d_%03d.fits" % (wdir, ty, tx)) tyxs = [(ty, tx) for ty in range(ny) for tx in range(nx) if (not cont or not is_done(ty, tx))] for ind in range(comm.rank, len(tyxs), comm.size): # Get basic area of this tile tyx = np.array(tyxs[ind]) if verbose: print("%2d Processing tile %2d %2d of %2d %2d" % (comm.rank, tyx[0], tyx[1], ny, nx)) yx1 = tyx * tshape yx2 = np.minimum((tyx + 1) * tshape, shape) # Apply padding wyx1 = yx1 - margin - padding wyx2 = yx2 + margin + padding # Transform from box-relative pixbox to global pixbox off = enmap.pixbox_of(meta.map_geometry[1], *geo)[0] wyx1 += off wyx2 += off # Process this tile res = search_maps(ifiles, mode=mode, icat=icat, pixbox=[wyx1, wyx2], templates=templates, mask=mask, cl_cmb=cl_cmb, freq0=freq0, nmat1=nmat1, nmat2=nmat2, snr1=snr1, snr2=snr2, comps=comps, dtype=dtype, sim_cat=sim_cat, sim_noise=sim_noise, verbose=verbose) # Write tile results to work directory. We do this to avoid using too much memory, # and to allow us to continue write_results(wdir, res, padding=padding, tag="%03d_%03d" % tuple(tyx)) comm.Barrier() # When everything's done, merge things into single files if comm.rank == 0: merge_results(wdir, odir, geo, tshape=tshape, margin=margin, verbose=verbose)