Exemplo n.º 1
0
def read_meta(fname, type="auto", maxmaps=1000, **kwargs):
    """Read metadata for the given mapdata file. Returns a bunch of
	nmap, map_geometry, ivar_geometry"""
    import numpy as np
    from pixell import enmap
    if type == "auto": type = infer_type(fname)
    if type == "zip":
        work, flexget, has = zipfile.ZipFile(fname, "r"), zip_flexopen, zip_has
    elif type == "dir":
        work, flexget, has = fname, dir_flexopen, dir_has
    elif type == "mapinfo":
        work, flexget, has = fname, mapinfo_flexget, mapinfo_has
    else:
        raise ValueError("Unrecognized type '%s'" % str(type))
    meta = bunch.Bunch(nmap=0, map_geometry=None, ivar_geometry=None)
    with flexget(work, "map1.fits") as f:
        meta["map_geometry"] = enmap.read_fits_geometry(f, **kwargs)
    with flexget(work, "ivar1.fits") as f:
        meta["ivar_geometry"] = enmap.read_fits_geometry(f, **kwargs)
    for i in range(maxmaps):
        if has(work, "map%d.fits" % (i + 1)): meta.nmap = i + 1
        else: break
    with flexget(work, "beam.txt") as f:
        meta.beam = read_beam(f)
    with flexget(work, "info.txt") as f:
        read_info(f, meta)
    if type == "zip": work.close()
    return meta
Exemplo n.º 2
0
def read_mapinfo(fname, type="auto", maxmaps=1000):
    """Reads the filenames (+info) from a link-type mapdata file, returning a dictionary
	{maps:[fname,...], ivars:[fname,...], beam:fname, info:{beam:num, freq:num}}"""
    if type == "auto": type = infer_type(fname)
    res = bunch.Bunch(maps=[], ivars=[], beam=None, gain=None, freq=None)
    if type == "zip":
        with zipfile.ZipFile(fname, "r") as ifile:
            res["beam"] = zip_readlink(ifile, "beam.txt")
            with ifile.open("info.txt", "r") as f:
                read_info(f, res)
            for i in range(0, maxmaps):
                try:
                    mapfile = zip_readlink(ifile, "map%d.fits" % (i + 1))
                    ivarfile = zip_readlink(ifile, "ivar%d.fits" % (i + 1))
                except KeyError:
                    break
                res["maps"].append(mapfile)
                res["ivars"].append(ivarfile)
            return res
    elif type == "dir":
        res["beam"] = getlink(fname + "/beam.txt")
        with open(fname + "/info.txt", "rb") as f:
            read_info(f, res)
        for i in range(0, maxmaps):
            try:
                mapfile = getlink(fname + "/map%d.fits" % (i + 1))
                ivarfile = getlink(fname + "/ivar%d.fits" % (i + 1))
            except FileNotFoundError:
                break
            res["maps"].append(mapfile)
            res["ivars"].append(ivarfile)
        return res
    else:
        raise ValueError("Unrecognized type '%s'" % str(type))
Exemplo n.º 3
0
def build_mapinfo(mapfiles=None,
                  ivarfiles=None,
                  beamfile=None,
                  infofile=None,
                  gain=None,
                  freq=None,
                  mapdatafile=None):
    """Build a mapinfo dictionary based on a combination of:
	mapdatafile: Path to an existing symbolic-link mapdata file. Any information not specified in
	  the other arguments will be taken from here
	mapfiles[], ivarfiles[], beamfile, infofile: Paths (or lists of paths for mapfiles or ivarfiles)
	  to the maps, ivars, beam and info that make up the mapdata file.
	gain: real number of the gain correction to use with these maps. Overrides the mapdatafile and infofile values.
	freq: As gain, but for the central frequency in GHz.
	"""
    data = bunch.Bunch(maps=[], ivars=[], beam=None, gain=None, freq=None)
    if mapdatafile is not None: data = read_mapinfo(mapdatafile)
    if mapfiles is not None:
        data["maps"] = [os.path.realpath(fname) for fname in mapfiles]
    if ivarfiles is not None:
        data["ivars"] = [os.path.realpath(fname) for fname in ivarfiles]
    if beamfile is not None: data["beam"] = os.path.realpath(beamfile)
    if infofile is not None:
        with open(infofile, "rb") as f:
            read_info(f, data)
    if gain is not None: data["gain"] = gain
    if freq is not None: data["freq"] = freq
    if data["gain"] is None: data["gain"] = 1.0
    return data
Exemplo n.º 4
0
 def solve(self, maxiter=500, maxerr=1e-6):
     self.prepare()
     rhs = self.dof.zip(self.map_rhs, self.junk_rhs)
     solver = utils.CG(self.A, rhs, M=self.M, dot=self.dof.dot)
     while True or solver.i < maxiter and solver.err > maxerr:
         solver.step()
         yield bunch.Bunch(i=solver.i,
                           err=solver.err,
                           x=self.dof.unzip(solver.x)[0])
Exemplo n.º 5
0
def websky_decode(data, cosmology, mass_interp):
    """Go from a raw websky catalog to pos, z and m200"""
    chi = np.sum(data.T[:3]**2, 0)**0.5  # comoving Mpc
    a = pyccl.scale_factor_of_chi(cosmology, chi)
    z = 1 / a - 1
    R = data.T[6].astype(float) * 1e6 * utils.pc  # m. This is *not* r200!
    rho_m = calc_rho_c(0, cosmology) * cosmology["Omega_m"]
    m200m = 4 / 3 * np.pi * rho_m * R**3
    m200 = mass_interp(m200m, z)
    ra, dec = utils.rect2ang(data.T[:3])
    return bunch.Bunch(z=z, ra=ra, dec=dec, m200=m200)
Exemplo n.º 6
0
def read_tileset_geometry(ipathfmt, itile1=(None, None), itile2=(None, None)):
    itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2)
    mfile1 = ipathfmt % {"y": itile1[0], "x": itile1[1]}
    mfile2 = ipathfmt % {"y": itile2[0] - 1, "x": itile2[1] - 1}
    m1 = enmap.read_map(mfile1)
    m2 = m1 if mfile1 == mfile2 else enmap.read_map(mfile2)
    wy, wx = m1.shape[-2:]
    oshape = tuple(
        np.array(m1.shape[-2:]) * (itile2 - itile1 - 1) +
        np.array(m2.shape[-2:]))
    return bunch.Bunch(shape=m1.shape[:-2] + oshape,
                       wcs=m1.wcs,
                       dtype=m1.dtype,
                       tshape=m1.shape[-2:])
Exemplo n.º 7
0
def get_params_battaglia(m200, z, cosmology):
    """Return a bunch of xc, alpha, beta, gamma for a cluster with
	the given m200 in SI units."""
    # First get the gnfw parameters. utils.gnfw has the opposite sign for
    # beta and gamma as nemo, but otherwise the same convention
    z1 = z + 1
    m = m200 / (1e14 * utils.M_sun)
    P0 = 18.1 * m**0.154 * z1**-0.758
    xc = 0.497 * m**-0.00865 * z1**0.731
    beta = 4.35 * m**0.0393 * z1**0.415
    alpha = 1
    gamma = -0.3
    # Go from battaglia convention to standard gnfw
    beta = gamma - alpha * beta
    return bunch.Bunch(xc=xc, alpha=alpha, beta=beta, gamma=gamma, P0=P0)
Exemplo n.º 8
0
def read_info(fileobj, out=None):
    if out is None: out = bunch.Bunch()
    try:
        out.beam = fileobj.beam
        out.freq = fileobj.freq
    except AttributeError:
        for line in fileobj:
            line = line.decode()
            toks = line.split(":")
            if len(toks) == 0 or line.startswith("#"): continue
            if toks[0] == "gain": out["gain"] = float(toks[1])
            elif toks[0] == "freq": out["freq"] = float(toks[1])
            else:
                raise IOError("Unrecognized key '%s' in info ifle" % (toks[0]))
    return out
Exemplo n.º 9
0
def parse_args(args, noglob=False):
    if isinstance(args, str):
        args = shlex.split(args)
    res, unkown = arg_parser.parse_known_args(args)
    res = bunch.Bunch(**res.__dict__)
    # Glob expansion
    if not noglob:
        ifiles = []
        for pattern in res.ifiles:
            matches = glob.glob(pattern)
            if len(matches) > 0:
                ifiles += matches
            else:
                ifiles.append(pattern)
        res.ifiles = ifiles
    return res
Exemplo n.º 10
0
def build_case_tsz(data, size=1 * utils.arcmin, scaling=None):
    if scaling is None:
        scaling = utils.tsz_spectrum(data.freqs * 1e9) / np.abs(
            utils.tsz_spectrum(data.freq0 * 1e9))
    # Get the fourier shapes
    lprofs = (utils.tsz_tform(size, data.l) * data.beams).astype(
        data.maps.dtype)
    lprofs /= np.max(lprofs, (-2, -1))[:, None, None]
    # Get the real-space templates for the model
    profs1d = []
    for i in range(data.n):
        lprof1d = utils.tsz_tform(size, np.arange(len(
            data.bls[i]))) * data.bls[i]
        lprof1d /= np.max(lprof1d)
        br = data.beam_profiles[i][0]
        profs1d.append(np.array([br, curvedsky.harm2profile(lprof1d, br)]))
    modeller = analysis.ModellerScaled(data.maps.shape, data.maps.wcs, profs1d,
                                       scaling)
    return bunch.Bunch(profile=lprofs, scaling=scaling, modeller=modeller)
Exemplo n.º 11
0
def read(fname, splits=None, type="auto", maxmaps=1000, **kwargs):
    """Read the maps, ivars, beam, gain and fequency from a mapdata file, and
	return them as a bunch of maps:[map,...], ivars:[ivar,...], beam:[:],
	gain:num, freq:num. All maps are read by default. Use the splits argument
	to read only a subset, e.g. splits=[0] to read only the first map for
	each of maps and ivars.

	All unrecognized arguments are forwarded to enmap.read_fits and used when
	reading the maps, allowing for subset reading etc."""
    import numpy as np
    from pixell import enmap
    if type == "auto": type = infer_type(fname)
    if type == "zip": work, flexget = zipfile.ZipFile(fname, "r"), zip_flexopen
    elif type == "dir": work, flexget = fname, dir_flexopen
    elif type == "mapinfo": work, flexget = fname, mapinfo_flexget
    else: raise ValueError("Unrecognized type '%s'" % str(type))
    data = bunch.Bunch(maps=[], ivars=[], beam=None, gain=None, freq=None)
    with flexget(work, "info.txt") as f:
        read_info(f, data)
    with flexget(work, "beam.txt") as f:
        # This supports theformats [l,b,...] and [b]. The beam is assumed to
        # start at l=0 and have a step of 1
        data.beam = read_beam(f)
    if splits is None:
        for i in range(0, maxmaps):
            try:
                with flexget(work, "map%d.fits" % (i + 1)) as f:
                    mapfile = enmap.read_fits(f, **kwargs)
                with flexget(work, "ivar%d.fits" % (i + 1)) as f:
                    ivarfile = enmap.read_fits(f, **kwargs)
            except FileNotFoundError:
                break
            data["maps"].append(mapfile)
            data["ivars"].append(ivarfile)
    else:
        for i in range(splits):
            with flexget(work, "map%d.fits" % (i + 1)) as f:
                data["maps"].append(enmap.read_fits(f, **kwargs))
            with flexget(work, "ivar%d.fits" % (i + 1)) as f:
                data["ivars"].append(enmap.read_fits(f, **kwargs))
    if type == "zip": work.close()
    return data
Exemplo n.º 12
0
def read_hdf(ifile):
    res = bunch.Bunch()
    with h5py.File(ifile, "r") as hfile:
        for key in hfile:
            res[key] = hfile[key][()]
    return res
Exemplo n.º 13
0
def search_maps_tiled(ifiles,
                      odir,
                      tshape=(1000, 1000),
                      margin=100,
                      padding=150,
                      mode="find",
                      icat=None,
                      box=None,
                      pixbox=None,
                      sel=None,
                      mask=None,
                      templates=default_templates,
                      cl_cmb=None,
                      freq0=98.0,
                      nmat1="constcorr",
                      nmat2="constcorr",
                      snr1=5,
                      snr2=4,
                      comps="TQU",
                      dtype=np.float32,
                      comm=None,
                      cont=False,
                      sim_cat=None,
                      sim_noise=False,
                      verbose=False):
    wdir = odir + "/work"
    utils.mkdir(wdir)
    if comm is None: comm = bunch.Bunch(rank=0, size=1)
    tshape = np.zeros(2, int) + tshape
    meta = mapdata.read_meta(ifiles[0])
    # Allow us to slice the map that will be tiled
    geo = enmap.Geometry(*meta.map_geometry)
    if pixbox is not None or box is not None:
        geo = geo.submap(pixbox=pixbox, box=box)
    if sel is not None: geo = geo[sel]
    shape = np.array(geo.shape[-2:])
    ny, nx = (shape + tshape - 1) // tshape

    def is_done(ty, tx):
        return os.path.isfile("%s/cat_%03d_%03d.fits" % (wdir, ty, tx))

    tyxs = [(ty, tx) for ty in range(ny) for tx in range(nx)
            if (not cont or not is_done(ty, tx))]
    for ind in range(comm.rank, len(tyxs), comm.size):
        # Get basic area of this tile
        tyx = np.array(tyxs[ind])
        if verbose:
            print("%2d Processing tile %2d %2d of %2d %2d" %
                  (comm.rank, tyx[0], tyx[1], ny, nx))
        yx1 = tyx * tshape
        yx2 = np.minimum((tyx + 1) * tshape, shape)
        # Apply padding
        wyx1 = yx1 - margin - padding
        wyx2 = yx2 + margin + padding
        # Transform from box-relative pixbox to global pixbox
        off = enmap.pixbox_of(meta.map_geometry[1], *geo)[0]
        wyx1 += off
        wyx2 += off
        # Process this tile
        res = search_maps(ifiles,
                          mode=mode,
                          icat=icat,
                          pixbox=[wyx1, wyx2],
                          templates=templates,
                          mask=mask,
                          cl_cmb=cl_cmb,
                          freq0=freq0,
                          nmat1=nmat1,
                          nmat2=nmat2,
                          snr1=snr1,
                          snr2=snr2,
                          comps=comps,
                          dtype=dtype,
                          sim_cat=sim_cat,
                          sim_noise=sim_noise,
                          verbose=verbose)
        # Write tile results to work directory. We do this to avoid using too much memory,
        # and to allow us to continue
        write_results(wdir, res, padding=padding, tag="%03d_%03d" % tuple(tyx))
    comm.Barrier()
    # When everything's done, merge things into single files
    if comm.rank == 0:
        merge_results(wdir,
                      odir,
                      geo,
                      tshape=tshape,
                      margin=margin,
                      verbose=verbose)
Exemplo n.º 14
0
def search_maps(ifiles,
                mode="find",
                icat=None,
                sel=None,
                pixbox=None,
                box=None,
                templates=default_templates,
                cl_cmb=None,
                freq0=98.0,
                nmat1="constcorr",
                nmat2="constcorr",
                snr1=5,
                snr2=4,
                comps="TQU",
                dtype=np.float32,
                apod=15 * utils.arcmin,
                verbose=False,
                sim_cat=None,
                sim_noise=False,
                mask=None):
    """Search the maps given by ifiles for objects, returning a bunch containing a catalog
	of objects, etc.

	Arguments:
	* ifiles: A list of paths to mapdata files. These should be in µK units.
	* mode:   What operation to do. Either "find" or "fit". Defaults to "find"
	  * "find":  Do a blind object search in the maps.
	  * "fit":   Do forced photometry on the positions provided in the input catalog icat.
	* sel, pixbox, box: These let you work with a subset of the maps. Same meaning as
	  in enmap.read_map. Default to None.
	* templates: What spectral and spatial shapes to look for. This is a list of tuples
	  that will be passed to build_cases. Defaults to default_templates.
	* cl_cmb: The CMB angular power spectrum. Ideally [TQU,TQU,nl] (note: not TEB).
	  Used to build the blind noise model.
	* freq0: The reference frequency in GHz. This is used when reporting the overall flux
	  of multifrequency templates.
	* nmat1: The noise model to use for the first pass of the search. These are
	  built from simple analytic models, not measured from the data.
	  "constcov": A constant covariance noise model. This is fast, but
	    purely harmonic, so it can't handle variations in hitcount.
	  "constcorr": A constant correlation noise model, where a constant
	    noise spectrum is modulated by the hitcount. Handles both correlations
	    and spatial variations, in a limited way. [default]
	* nmat2: The noise model to use for the second pass of the search. These
	  are built from maps cleaned using the first pass. Defaults to "constcorr".
	  "none": Disables the second pass, returning the catalog found in the first pass
	  "constcov", "constcorr": As nmat1, but measured from the data. The power spectra
	  are smoothed isotropically for now.
	* snr1: The S/N threshold used for the first pass in "find" mode. Defaults to 5.
	* snr2. The S/N threshold used for the second pass in "find" mode. Defaults to 4.
	* comps: Controls the Stokes parameters considered. Can be "T" or "TQU". Defaults to "TQU".
	* dtype: The maps will be cast to this data type after reading in. Defaults to np.float32.
	* apod: How much apodization is used at the edges (including edges of unhit areas),
	  in radians. This is necessary to avoid ringing artifacts in the fourier transforms.
	  Defaults to 15 arcmin.
	* verbose: Whether to print what it's doing. Defaults to False.
	* sim_noise: Whether to replace the raw data with noise. Currently a cmb-less constcorr
	  realization. Should fix this.
	* sim_cat: A catalog to inject into the maps before the search. Defaults to None.
	* mask: Path to a mask enmap that's True in bad regions and False in good regions.
	  Bad regions will be given zero weight and apodized.
	
	Returns a bunch with the following members:
	* cat: A catalog with the data type [("ra", "d"), ("dec", "d"), ("snr", "d", (ncomp,)),
	  ("flux_tot", "d", (ncomp,)), ("dflux_tot", "d", (ncomp,)), ("flux", "d", (nfield,ncomp)),
	  ("dflux", "d", (nfield,ncomp)), ("case", "i"), ("contam", "d", (nfield,))]
	* maps: The maps that were searched [nfreq,ncomp,ny,nx]
	* model: The best-fit model
	* snr:  The S/N ratio for the input maps [ny,nx]
	* resid_snr: The S/N ratio after subtracting the best-fit model
	* freqs: The frequencies, in GHz
	* fconvs: The µK -> mJy/sr flux conversion factor for each frequency
	* inds: The index of each entry in the output catalog in the input catalog.
	  Only relevant when mode == "fit".
"""

    # Read in the total intensity data
    if verbose: print("Reading T from %s" % str(ifiles))
    data = read_data(ifiles,
                     sel=sel,
                     pixbox=pixbox,
                     box=box,
                     dtype=dtype,
                     apod=apod,
                     mask=mask)
    data.freq0 = freq0
    ncomp = len(comps)
    nfield = len(data.maps)
    cat_dtype = [("ra", "d"), ("dec", "d"), ("snr", "d", (ncomp, )),
                 ("flux_tot", "d", (ncomp, )), ("dflux_tot", "d", (ncomp, )),
                 ("flux", "d", (nfield, ncomp)),
                 ("dflux", "d", (nfield, ncomp)), ("case", "i"),
                 ("contam", "d", (nfield, ))]
    cases = build_cases(data, templates)

    # Get the part of the catalog inside our area
    if mode == "fit":
        inds = np.where(
            np.any(data.ivars.at([icat.dec, icat.ra], order=0) > 0, 0))[0]
        subicat = icat[inds]
    else:
        inds = None

    # Abort if we have no data to process
    if np.all(data.ivars == 0):
        map_tot = enmap.zeros((nfield, ncomp) + data.maps.shape[-2:],
                              data.maps.wcs, dtype)
        cat = np.zeros(0, cat_dtype)
        return bunch.Bunch(cat=cat,
                           maps=map_tot,
                           model=map_tot,
                           snr=map_tot[0, 0],
                           resid_snr=map_tot[0, 0],
                           hits=map_tot[0, 0],
                           fconvs=data.fconvs,
                           freqs=data.freqs,
                           inds=inds)

    # Build our noise model, based on a 1/l spectrum + cmb + a foreground penalty
    cmb = build_cmb_2d(*data.maps.geometry, cl_cmb,
                       dtype=data.maps.dtype) if cl_cmb is not None else None
    fg_var = build_foreground_var(data.maps)
    nmat = build_nmat_prior(data,
                            type=nmat1,
                            fg_var=fg_var,
                            cmb=cmb[0, 0] if cmb is not None else None)

    # Optionally inject signal. FIXME: This should be moved after nmat is defined,
    # so we can let nmat handle the noise simulation
    if sim_noise: data.maps = nmat.simulate() * data.apod
    if sim_cat is not None:
        inject_objects(data, cases, slice_cat_comp(sim_cat, 0))

    # Total intensity
    if mode == "find":
        if verbose: print("1st pass T find")
        res_t = find_objects(data,
                             cases,
                             nmat,
                             snmin=snr1,
                             resid=nmat2 == "none",
                             verbose=verbose)
    elif mode == "fit":
        if verbose: print("1st pass T measure")
        res_t = measure_objects(data,
                                cases,
                                nmat,
                                slice_cat_comp(subicat, 0),
                                resid=nmat2 == "none",
                                verbose=verbose)
    else:
        raise ValueError("Unrecognized mode '%s'" % (mode))
    if nmat2 != "none":
        noise = data.maps - res_t.model
        if "bad_mask" in res_t:
            noise_apod = enmap.apod_mask(1 - res_t.bad_mask,
                                         10 * utils.arcmin,
                                         edge=False)
            noise *= noise_apod
            noise /= np.mean(noise_apod**2)
            #enmap.write_map("noise.fits", noise)
            del noise_apod
        nmat = build_nmat_empirical(data, noise, fg_var=fg_var, type=nmat2)
        if mode == "find":
            if verbose: print("2nd pass T find")
            res_t = find_objects(data,
                                 cases,
                                 nmat,
                                 snmin=snr2,
                                 resid=True,
                                 verbose=verbose)
        elif mode == "fit":
            if verbose: print("2nd pass T measure")
            res_t = measure_objects(data,
                                    cases,
                                    nmat,
                                    slice_cat_comp(subicat, 0),
                                    resid=True,
                                    verbose=verbose)
        else:
            raise ValueError("Unrecognized mode '%s'" % (mode))

    res = [res_t]
    # Polarization is always "fit", since anything that would be found in polarization
    # would definitely be found in total intensity
    if comps == "T":
        pass
    elif comps == "TQU":
        # Measure polarization too
        for comp in [1, 2]:
            if verbose:
                print("Reading %s from %s" % (comps[comp], str(ifiles)))
            data = read_data(ifiles,
                             sel=sel,
                             pixbox=pixbox,
                             box=box,
                             comp=comp,
                             apod=apod)
            data.freq0 = freq0
            # Optionally inject signal
            if sim_cat is not None:
                inject_objects(data, cases, slice_cat_comp(sim_cat, comp))
            if verbose: print("1st pass %s measure" % comps[comp])
            nmat = build_nmat_prior(data,
                                    type=nmat1,
                                    pol=True,
                                    cmb=cmb[comp,
                                            comp] if cmb is None else None)
            res_p = measure_objects(data,
                                    cases,
                                    nmat,
                                    res_t.cat,
                                    verbose=verbose)
            if nmat2 != "none":
                if verbose: print("2nd pass %s measure" % comps[comp])
                nmat = build_nmat_empirical(data,
                                            noise_map=data.maps - res_p.model,
                                            type=nmat2)
                res_p = measure_objects(data,
                                        cases,
                                        nmat,
                                        res_t.cat,
                                        verbose=verbose)
            res.append(res_p)
    # First the catalog
    cat = np.zeros(len(res_t.cat), cat_dtype).view(np.recarray)
    cat.ra = res_t.cat.ra
    cat.dec = res_t.cat.dec
    cat.case = res_t.cat.case
    cat.contam = res_t.cat.contam
    for i in range(len(res)):
        cat.snr[:, i] = res[i].cat.snr
        cat.flux_tot[:, i] = res[i].cat.flux_tot
        cat.dflux_tot[:, i] = res[i].cat.dflux_tot
        cat.flux[:, :, i] = res[i].cat.flux
        cat.dflux[:, :, i] = res[i].cat.dflux
    # Then the maps
    map_tot = enmap.samewcs(np.concatenate([r.maps[:, None] for r in res], 1),
                            data.maps)
    model_tot = enmap.samewcs(
        np.concatenate([r.model[:, None] for r in res], 1), data.maps)
    result = bunch.Bunch(cat=cat,
                         maps=map_tot,
                         model=model_tot,
                         fconvs=data.fconvs,
                         freqs=data.freqs,
                         inds=inds)
    # These only exist in "find" mode
    for key in ["snr", "resid_snr", "hits"]:
        result[key] = res_t[key] if key in res_t else None
    return result
Exemplo n.º 15
0
def build_case_ptsrc(data, scaling=None):
    if scaling is None: scaling = np.full(data.n, 1.0)
    scaling = np.asarray(scaling).astype(data.maps.dtype)
    modeller = analysis.ModellerPerfreq(data.maps.shape, data.maps.wcs,
                                        data.beam_profiles)
    return bunch.Bunch(profile=data.beams, scaling=scaling, modeller=modeller)
Exemplo n.º 16
0
def read_data(fnames,
              sel=None,
              pixbox=None,
              box=None,
              geometry=None,
              comp=0,
              split=0,
              unit="flux",
              dtype=np.float32,
              beam_rmax=5 * utils.degree,
              beam_res=2 * utils.arcsec,
              deconv_pixwin=True,
              apod=15 * utils.arcmin,
              mask=None,
              ivscale=[1, 0.5, 0.5]):
    """Read multi-frequency data for a single split of a single component, preparing it for
	analysis."""
    # Read in our data files and harmonize
    br = np.arange(0, beam_rmax, beam_res)
    data = bunch.Bunch(maps=[],
                       ivars=[],
                       beams=[],
                       freqs=[],
                       l=None,
                       bls=[],
                       names=[],
                       beam_profiles=[])
    for ifile in fnames:
        d = mapdata.read(ifile,
                         sel=sel,
                         pixbox=pixbox,
                         box=box,
                         geometry=geometry)
        # The 0 here is just selecting the first split. That is, we don't support splits
        data.maps.append(d.maps[split].astype(dtype)[comp])
        data.ivars.append(d.ivars[split].astype(dtype) * ivscale[comp])
        data.freqs.append(d.freq)
        if data.l is None: data.l = d.maps[0].modlmap()
        data.beams.append(
            enmap.ndmap(
                np.interp(data.l, np.arange(len(d.beam)),
                          d.beam / np.max(d.beam)),
                d.maps[0].wcs).astype(dtype))
        data.names.append(".".join(os.path.basename(ifile).split(".")[:-1]))
        data.bls.append(d.beam)
        data.beam_profiles.append(
            np.array([br, curvedsky.harm2profile(d.beam, br)]).astype(dtype))

    data.maps = enmap.enmap(data.maps)
    data.ivars = enmap.enmap(data.ivars)
    data.beams = enmap.enmap(data.beams)
    data.freqs = np.array(data.freqs)
    if unit == "uK":
        data.fconvs = np.full(len(data.freqs), 1.0, dtype)
    elif unit == "flux":
        data.fconvs = (utils.dplanck(data.freqs * 1e9, utils.T_cmb) /
                       1e3).astype(dtype)  # uK -> mJy/sr
    else:
        raise ValueError("Unrecognized unit '%s'" % str(unit))
    data.n = len(data.freqs)

    # Apply the unit
    data.maps *= data.fconvs[:, None, None]
    data.ivars /= data.fconvs[:, None, None]**2

    if mask is not None:
        mask_map = 1 - enmap.read_map(mask, sel=sel, pixbox=pixbox, box=box)
        data.ivars *= mask_map
        del mask_map

    # Should generalize this to handle internal map edges and frequency differences
    mask = enmap.shrink_mask(enmap.grow_mask(data.ivars > 0, 1 * utils.arcmin),
                             1 * utils.arcmin)
    apod_map = enmap.apod_mask(mask, apod)
    data.apod = apod_map
    data.fapod = np.mean(apod_map**2)
    data.maps *= apod_map
    data.ivars *= apod_map**2

    # Get the pixel window and optionall deconvolve it
    data.wy, data.wx = [
        w.astype(dtype) for w in enmap.calc_window(data.maps.shape)
    ]
    if deconv_pixwin:
        data.maps = enmap.ifft(
            enmap.fft(data.maps) / data.wy[:, None] / data.wx[None, :]).real

    return data
Exemplo n.º 17
0
 def build_obs(self, id, obs, noise_model=None):
     # Signal must have the right dtype, or the pmat we build will break later
     t1 = time.time()
     tod = obs.signal.astype(self.dtype_tod, copy=False)
     ctime = obs.timestamps
     # Set up cuts handling
     pcut = PmatCut(obs.glitch_flags)
     # Build the local geometry and pointing matrix for this observation
     if self.recenter:
         rot = recentering_to_quat_lonlat(
             *evaluate_recentering(self.recenter,
                                   ctime=ctime[len(ctime) // 2],
                                   geom=(self.shape, self.wcs),
                                   site=SITE))
     else:
         rot = None
     # Ideally we would include cuts in the pmat. It would slightly simplify PmatCut, which
     # would skip the "clear" step, and it would make the map_div calculation simpler.
     # However, doing so changes the result, which should be investigated.
     pmat = coords.pmat.P.for_tod(obs,
                                  comps=self.comps,
                                  geom=(self.shape, self.wcs),
                                  rot=rot)
     t2 = time.time()
     # Build the noise model
     if noise_model is None: noise_model = self.noise_model
     srate = (len(ctime) - 1) / (ctime[-1] - ctime[0])
     nmat = self.noise_model.build(tod, srate=srate)
     t3 = time.time()
     tod = nmat.apply(tod)
     t4 = time.time()
     map_rhs = enmap.zeros((self.ncomp, ) + self.shape, self.wcs,
                           self.dtype_map)
     junk_rhs = np.zeros(pcut.njunk, self.dtype_tod)
     ## FIXME
     #so3g.test_cuts(pcut.cuts.ranges)
     pcut.backward(tod, junk_rhs)
     pmat.to_map(dest=map_rhs, signal=tod)
     t5 = time.time()
     # After this we don't need the tod values any more, so we are free to mess with them.
     map_div = enmap.zeros((self.ncomp, self.ncomp) + self.shape, self.wcs,
                           self.dtype_map)
     junk_div = np.ones(pcut.njunk, self.dtype_tod)
     tod[:] = 0
     pcut.forward(tod, junk_div)
     tod *= nmat.ivar[:, None]
     pcut.backward(tod, junk_div)
     #pmat.to_weights(dest=map_div, det_weights=nmat.ivar.astype(self.dtype_tod))
     # Full manual build of map_div
     for i in range(self.ncomp):
         map_div[i] = 0
         map_div[i, i] = 1
         tod[:] = 0
         pmat.from_map(map_div[i], dest=tod)
         pcut.clear(tod)
         tod *= nmat.ivar[:, None]
         map_div[i] = 0
         pmat.to_map(signal=tod, dest=map_div[i])
     t6 = time.time()
     if np.any(map_div[0, 0, 0, :] != 0) or np.any(
             map_div[0, 0, -1, :] != 0) or np.any(
                 map_div[0, 0, :, 0] != 0) or np.any(
                     map_div[0, 0, :, -1] != 0):
         warnings.warn("Local work space was too small - data truncated")
     # And return the ML data for this observation
     data = bunch.Bunch(id=id,
                        ndet=obs.dets.count,
                        nsamp=len(ctime),
                        dets=obs.dets.vals,
                        shape=self.shape,
                        wcs=self.wcs,
                        pmat=pmat,
                        pcut=pcut,
                        nmat=nmat,
                        map_rhs=map_rhs,
                        map_div=map_div,
                        junk_rhs=junk_rhs,
                        junk_div=junk_div)
     #L.debug("build %-70s : Pbuild %8.3f Nbuild %8.3f Pw' %8.3f N %8.3f Pm' %8.3f  %3d %6d" % (id, t2-t1, t3-t2, t6-t5, t4-t3, t5-t4, data.ndet, data.nsamp))
     return data
Exemplo n.º 18
0
    c11 = [hp.alm2cl(alms[0][i], alms[0][i]) for i in range(3)]
    c22 = [hp.alm2cl(alms[1][i], alms[1][i]) for i in range(3)]
    cross = hp.alm2cl(alms[0][0], alms[1][0])
    if white or tube[0] == "S":
        zeros.append(cross)
        c12 = []
    else:
        c12 = [cross]
    ls = np.arange(c11[0].size)
    return ls, c11, c22, c12, zeros


for key in config.keys():
    print(f"Test {key}")
    c = bunch.Bunch(config[key])
    nside, shape, wcs, res = get_geom(c)
    nsim = noise.SONoiseSimulator(
        nside=nside,
        shape=shape,
        wcs=wcs,
        homogenous=c.homogenous,
        sky_fraction=c.fsky if c.homogenous else None,
    )
    ells, nlt, nlp = nsim.get_noise_spectra(c.tube, ncurve_sky_fraction=c.fsky)

    if not (c.homogenous):
        try:
            if nside is not None:
                mask = hp.read_map(c.mask)
            else: