class Test_z_chi(BaseVectorTest): x = np.linspace(100, 1000) z_int = np.logspace(-8, np.log10(1500), 10000) chis = Planck15.comoving_distance(z_int) #Mpc/h z_chi_int = scipy.interpolate.interp1d(chis, z_int, kind=3, bounds_error=False, fill_value=0.) y = z_chi_int(x) def model(self, x): y = lightcone.z_chi(x, Planck15, self.z_chi_int) return y
def main(ns): if ns.zlmax is None: ns.zlmax = max(ns.zs) zs_list = ns.zs ###### JL hardcode zs_list #zs_list = numpy.arange(ns.zs, 2.21, 0.1) zs_list = ns.zs zlmin = ns.zlmin zlmax = zs_list[-1]#ns.zlmax # no need to be accurate here ds_list = Planck15.comoving_distance(zs_list) path = ns.source cat = BigFileCatalog(path, dataset=ns.dataset) kappa = 0 Nm = 0 kappabar = 0 npix = healpix.nside2npix(ns.nside) localsize = npix * (cat.comm.rank + 1) // cat.comm.size - npix * (cat.comm.rank) // cat.comm.size nbar = (cat.attrs['NC'] ** 3 / cat.attrs['BoxSize'] ** 3 * cat.attrs['ParticleFraction'])[0] # print('DEBUG BoxSize', cat.attrs['BoxSize']) Nsteps = int(numpy.round((zlmax - zlmin) / ns.zstep)) if Nsteps < 2 : Nsteps = 2 z = numpy.linspace(zlmax, zlmin, Nsteps+1, endpoint=True) if cat.comm.rank == 0: cat.logger.info("Splitting data redshift bins %s" % str(z)) kappa_all = numpy.zeros((Nsteps, len(zs_list), localsize)) for i, (z1, z2) in enumerate(zip(z[:-1], z[1:])): import gc gc.collect() if cat.comm.rank == 0: cat.logger.info("nbar = %g, zlmin = %g, zlmax = %g zs = %s" % (nbar, z2, z1, zs_list)) slice = read_range(cat, 1/(1 + z1), 1 / (1 + z2)) if slice.csize == 0: continue if cat.comm.rank == 0: cat.logger.info("read %d particles" % slice.csize) kappa1, kappa1bar, Nm1 = make_kappa_maps(slice, ns.nside, zs_list, ds_list, localsize, nbar) kappa = kappa + kappa1 kappa_all[i] = kappa1 Nm = Nm + Nm1 kappabar = kappabar + kappa1bar cat.comm.barrier() if cat.comm.rank == 0: # use bigfile because it allows concurrent write to different datasets. cat.logger.info("writing to %s", ns.output) # array to get all map slices if cat.comm.rank == 0: kappa1_all = numpy.zeros((Nsteps, int(12*ns.nside**2))) for i, (zs, ds) in enumerate(zip(zs_list, ds_list)): std = numpy.std(cat.comm.allgather(len(kappa[i]))) mean = numpy.mean(cat.comm.allgather(len(kappa[i]))) if cat.comm.rank == 0: cat.logger.info("started gathering source plane %s, size-var = %g, size-bar = %g" % (zs, std, mean)) kappa1 = GatherArray(kappa[i], cat.comm) Nm1 = GatherArray(Nm[i], cat.comm) # get slices of kappa map for j in range(Nsteps): kappa1_allj = GatherArray(kappa_all[j,i], cat.comm) if cat.comm.rank == 0: kappa1_all[j] = kappa1_allj if cat.comm.rank == 0: cat.logger.info("done gathering source plane %s" % zs) if cat.comm.rank == 0: fname = ns.output + "/WL-%02.2f-N%04d" % (zs, ns.nside) cat.logger.info("started writing source plane %s" % zs) with bigfile.File(fname, create=True) as ff: print('DEBUG', kappa1_all.shape, len(kappa1_all), numpy.dtype((kappa1_all.dtype, kappa1_all.shape[1:]))) ds1 = ff.create_from_array("kappa", kappa1, Nfile=1) ds2 = ff.create_from_array("Nm", Nm1, Nfile=1) #ds3 = ff.create_from_array("kappa_all", kappa1_all.T, Nfile=1)#, memorylimit=1024*1024*1024) for d in ds1, ds2:#, ds3: d.attrs['kappabar'] = kappabar[i] d.attrs['nside'] = ns.nside d.attrs['zlmin'] = zlmin d.attrs['zlmax'] = zlmax d.attrs['zstep'] = ns.zstep d.attrs['zs'] = zs d.attrs['ds'] = ds d.attrs['nbar'] = nbar cat.comm.barrier() if cat.comm.rank == 0: # use bigfile because it allows concurrent write to different datasets. cat.logger.info("source plane at %g written. " % zs)
def main(ns): if ns.zlmax is None: ns.zlmax = max(ns.zs) zs_list = ns.zs zlmin = ns.zlmin zlmax = ns.zlmax # no need to be accurate here ds_list = Planck15.comoving_distance(zs_list) path = ns.source #'/global/cscratch1/sd/yfeng1/m3127/desi/1536-9201-40eae2464/lightcone/usmesh/' cat = BigFileCatalog(path, dataset=ns.dataset) kappa = 0 Nm = 0 kappabar = 0 npix = healpix.nside2npix(ns.nside) localsize = npix * (cat.comm.rank + 1) // cat.comm.size - npix * ( cat.comm.rank) // cat.comm.size nbar = (cat.attrs['NC']**3 / cat.attrs['BoxSize']**3 * cat.attrs['ParticleFraction'])[0] Nsteps = int(numpy.round((zlmax - zlmin) / ns.zstep)) if Nsteps < 2: Nsteps = 2 z = numpy.linspace(zlmax, zlmin, Nsteps, endpoint=True) if cat.comm.rank == 0: cat.logger.info("Splitting data redshift bins %s" % str(z)) for z1, z2 in zip(z[:-1], z[1:]): import gc gc.collect() if cat.comm.rank == 0: cat.logger.info("nbar = %g, zlmin = %g, zlmax = %g zs = %s" % (nbar, z2, z1, zs_list)) slice = read_range(cat, 1 / (1 + z1), 1 / (1 + z2)) if slice.csize == 0: continue if cat.comm.rank == 0: cat.logger.info("read %d particles" % slice.csize) kappa1, kappa1bar, Nm1 = make_kappa_maps(slice, ns.nside, zs_list, ds_list, localsize, nbar) kappa = kappa + kappa1 Nm = Nm + Nm1 kappabar = kappabar + kappa1bar cat.comm.barrier() if cat.comm.rank == 0: # use bigfile because it allows concurrent write to different datasets. cat.logger.info("writing to %s", ns.output) for i, (zs, ds) in enumerate(zip(zs_list, ds_list)): std = numpy.std(cat.comm.allgather(len(kappa[i]))) mean = numpy.mean(cat.comm.allgather(len(kappa[i]))) if cat.comm.rank == 0: cat.logger.info( "started gathering source plane %s, size-var = %g, size-bar = %g" % (zs, std, mean)) kappa1 = GatherArray(kappa[i], cat.comm) Nm1 = GatherArray(Nm[i], cat.comm) if cat.comm.rank == 0: cat.logger.info("done gathering source plane %s" % zs) if cat.comm.rank == 0: fname = ns.output + "/WL-%02.2f-N%04d" % (zs, ns.nside) cat.logger.info("started writing source plane %s" % zs) with bigfile.File(fname, create=True) as ff: ds1 = ff.create_from_array("kappa", kappa1, Nfile=1) ds2 = ff.create_from_array("Nm", Nm1, Nfile=1) for d in ds1, ds2: d.attrs['kappabar'] = kappabar[i] d.attrs['nside'] = ns.nside d.attrs['zlmin'] = zlmin d.attrs['zlmax'] = zlmax d.attrs['zs'] = zs d.attrs['ds'] = ds d.attrs['nbar'] = nbar cat.comm.barrier() if cat.comm.rank == 0: # use bigfile because it allows concurrent write to different datasets. cat.logger.info("source plane at %g written. " % zs)