Exemple #1
0
def test_nody():
    """ Checking end to end nbody
  """
    a0 = 0.1

    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)
    solver = Solver(pm, Planck15, B=1)
    stages = np.linspace(0.1, 1.0, 10, endpoint=True)

    # Generate initial state with fastpm
    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)
    statelpt = solver.lpt(lineark, grid, a0, order=1)
    finalstate = solver.nbody(statelpt, leapfrog(stages))
    final_cube = pm.paint(finalstate.X)

    # Same thing with flowpm
    tlinear = tf.expand_dims(np.array(lineark.c2r()), 0)
    state = tfpm.lpt_init(tlinear, a0, order=1)
    state = tfpm.nbody(state, stages, nc)
    tfread = pmutils.cic_paint(tf.zeros_like(tlinear), state[0]).numpy()

    assert_allclose(final_cube, tfread[0], atol=1.2)
Exemple #2
0
def make_data(bs, nc, seed, nsteps, path='', z=0):
    #initiate
    pm = ParticleMesh(BoxSize=bs, Nmesh=(nc, nc, nc), dtype='f8')

    ofolder = path + 'z%02d/L%04d_N%04d_S%04d_%02dstep/'%(z*10, bs, nc, seed, nsteps)
#        
    
    if pm.comm.rank == 0:
        print('Make data for seed = %d'%seed)
        print('Data to be saved at path - %s'%ofolder)

    klin, plin = numpy.loadtxt('ics_matterpow_0.dat', unpack = True)
    pk = interpolate(klin, plin)
    cosmo = Planck15.clone(Omega_cdm = 0.2685, h = 0.6711, Omega_b = 0.049)
    #pk = EHPower(Planck15, redshift=0)
    #cosmo = Planck15

    s_truth = pm.generate_whitenoise(seed, type='complex')\
            .apply(lambda k, v: v * (pk(sum(ki **2 for ki in k) **0.5) / v.BoxSize.prod()) ** 0.5)\
            .c2r()

    #dynamics
    aa = 1.0/(1+z)
    if pm.comm.rank == 0:
        print('Evolve to redshift = %0.1f, scale factor = %0.2f'%(z, aa))
    stages = numpy.linspace(0.1, aa, nsteps, endpoint=True)

    start = time()
    dynamic_model = NBodyModel(cosmo, pm, B=2, steps=stages)
    #dynamic_model = LPTModel(cosmo, pm, B=2, steps=stages)

    #Save data

    X, V, final = dynamic_model.get_code().compute(['X', 'V', 'final'], init={'parameters':s_truth})
    end = time()
    print('Time taken = ', end-start)

    save_map(s_truth, ofolder + 'mesh', 's')
    save_map(final, ofolder + 'mesh', 'd')

    if pm.comm.rank == 0:
        print('X, V computed')
    cat = ArrayCatalog({'Position': X, 'Velocity' : V}, BoxSize=pm.BoxSize, Nmesh=pm.Nmesh)
    kdd = KDDensity(cat).density
    cat['KDDensity'] = kdd
    cat['InitPosition'] = dynamic_model.get_code().engine.q
    cat.save(ofolder + 'dynamic/1', ('InitPosition', 'Position', 'Velocity', 'KDDensity'))
    if pm.comm.rank == 0:
        print('dynamic model created')

    #FOF
    fof = FOF(cat, linking_length=0.2, nmin=12)
    fofcat = fof.find_features(peakcolumn='KDDensity')
    fofcat['Mass'] = fofcat['Length'] * cosmo.Om0 * 27.7455 * pm.BoxSize.prod() / pm.Nmesh.prod()
    fofcat.save(ofolder+'FOF', ('CMPosition', 'CMVelocity', 'PeakPosition', 'PeakVelocity', 'Length', 'Mass'))
Exemple #3
0
class Test_z_chi(BaseVectorTest):

    x = np.linspace(100, 1000)
    z_int = np.logspace(-8, np.log10(1500), 10000)
    chis = Planck15.comoving_distance(z_int)  #Mpc/h
    z_chi_int = scipy.interpolate.interp1d(chis,
                                           z_int,
                                           kind=3,
                                           bounds_error=False,
                                           fill_value=0.)
    y = z_chi_int(x)

    def model(self, x):
        y = lightcone.z_chi(x, Planck15, self.z_chi_int)
        return y
Exemple #4
0
def test_lpt2():
    """ Checking lpt2_source, this also checks the laplace and gradient kernels
  """
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    source = fpmops.lpt2source(lineark).c2r()

    # Same thing from tensorflow
    tfsource = tfpm.lpt2_source(
        pmutils.r2c3d(tf.expand_dims(np.array(lineark.c2r()), axis=0)))
    tfread = pmutils.c2r3d(tfsource).numpy()

    assert_allclose(source, tfread[0], atol=1e-5)
Exemple #5
0
def main():
    ap = ArgumentParser()
    ap.add_argument('fpm', help='e.g. /scratch/fpm_0.1000/')
    ap.add_argument('ll', type=float, help='e.g. 0.2 or 0.168') 
    ap.add_argument('--with-peak', help='Find Peaks KDDensity estimation (slow)', default=True)
    ap.add_argument('fof', help='e.g. /scratch/fpm_0.1000/fof . Will write to {fof}/{ll}')
    ap.add_argument('--nmin', type=int, default=20, help='min number of particles to be in the catalogue')

    ns = ap.parse_args()

    cat = BigFileCatalog(ns.fpm, header='Header', dataset='1/')

    cat.attrs['BoxSize']  = numpy.ones(3) * cat.attrs['BoxSize'][0]
    cat.attrs['Nmesh']  = numpy.ones(3) * cat.attrs['NC'][0]

    cosmo = Planck15.match(Omega0_m=cat.attrs['OmegaM'])

    M0 = cat.attrs['OmegaM'][0] * 27.75 * 1e10 * cat.attrs['BoxSize'].prod() / cat.csize

    if cat.comm.rank == 0:
        print('BoxSize', cat.attrs['BoxSize'])
        print('Nmesh', cat.attrs['Nmesh'])
        print('Mass of a particle', M0)
        print('OmegaM', cosmo.Om0)


    if ns.with_peak:
        cat['Density'] = KDDensity(cat).density

    fof = FOF(cat, linking_length=ns.ll, nmin=ns.nmin)

    if ns.with_peak:
        features = fof.find_features(peakcolumn='Density')
    else:
        features = fof.find_features(peakcolumn=None)

    features['Mass'] = M0 * features['Length']
    if fof.comm.rank == 0:
        print('Total number of features found', features.csize)
        print('Saving columns', features.columns)

    features.save(ns.fof + '/%0.3f' % ns.ll, features.columns)
Exemple #6
0
def test_lpt_init():
    """
  Checking lpt init
  """
    a0 = 0.1

    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)
    solver = Solver(pm, Planck15, B=1)

    # Generate initial state with fastpm
    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)
    statelpt = solver.lpt(lineark, grid, a0, order=1)

    # Same thing with flowpm
    tlinear = tf.expand_dims(np.array(lineark.c2r()), 0)
    tfread = tfpm.lpt_init(tlinear, a0, order=1).numpy()

    assert_allclose(statelpt.X, tfread[0, 0] * bs / nc, rtol=1e-2)
Exemple #7
0
def test_lpt1_64():
    """ Checking lpt1, this also checks the laplace and gradient kernels
  This variant of the test checks that it works for cubes of size 64
  """
    nc = 64
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    lpt = fpmops.lpt1(lineark, grid)

    # Same thing from tensorflow
    tfread = tfpm.lpt1(
        pmutils.r2c3d(tf.expand_dims(np.array(lineark.c2r()), axis=0)),
        grid.reshape((1, -1, 3)) * nc / bs).numpy()

    assert_allclose(lpt, tfread[0] * bs / nc, atol=5e-5)
Exemple #8
0
def test_matter_dominated():
    C = Planck15.clone(T_cmb=0.1,
                       m_ncdm=[])  # suppress cmb and remove neutrino
    a = numpy.logspace(-2, 0, 11, endpoint=True)
    z = 1 / a - 1

    pt = MatterDominated(C.Omega0_m, a=a)

    # linear growth function
    D1 = pt.D1(a)
    D_CC = C.scale_independent_growth_factor(z)
    assert_allclose(D1, D_CC, rtol=1e-3)

    # linear growth rate
    f1 = pt.f1(a)
    f_CC = C.scale_independent_growth_rate(z)
    assert_allclose(f1, f_CC, rtol=1e-3)

    # second order quantities
    D2 = pt.D2(a)
    f2 = pt.f2(a)
Exemple #9
0
def test_lpt1():
    """ Checking lpt1, this also checks the laplace and gradient kernels
  """
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    lpt = fpmops.lpt1(lineark, grid)

    # Same thing from tensorflow
    with tf.Session() as sess:
        state = tfpm.lpt1(
            pmutils.r2c3d(tf.expand_dims(tf.constant(lineark.c2r()), axis=0)),
            grid.reshape((1, -1, 3)) * nc / bs)
        tfread = sess.run(state)

    assert_allclose(lpt, tfread[0] * bs / nc, atol=1e-5)
Exemple #10
0
def set_up():
    params = get_params()
    pm = ParticleMesh(Nmesh=params['Nmesh'],
                      BoxSize=params['BoxSize'],
                      comm=MPI.COMM_WORLD,
                      resampler='cic')
    # generate initial conditions
    cosmo = Planck15.clone(P_k_max=30)
    x = pm.generate_uniform_particle_grid(shift=0.1)
    BoxSize2D = [deg / 180. * np.pi for deg in params['BoxSize2D']]
    logger = None
    sim = lightcone.WLSimulation(stages=numpy.linspace(0.1,
                                                       1.0,
                                                       params['N_steps'],
                                                       endpoint=True),
                                 cosmology=cosmo,
                                 pm=pm,
                                 boxsize2D=BoxSize2D,
                                 params=params,
                                 logger=None)
    kmaps = [sim.mappm.create('real', value=0.) for ii in range(1)]

    return pm, cosmo, x, kmaps, sim.DriftFactor, sim.mappm, sim
Exemple #11
0
def test_solver(comm):
    pm = ParticleMesh(BoxSize=512., Nmesh=[8, 8, 8], comm=comm)
    solver = Solver(pm, Planck15, B=1)

    P_prm = Planck15.Primordial.get_pkprim

    tf = get_species_transfer_function_from_class(Planck15, 9)

    Q = pm.generate_uniform_particle_grid(shift=0)

    wn = solver.whitenoise(1234)
    prm = solver.primordial(wn, P_prm)
    ic = solver.lpt(prm, {
                '0': (Baryon, tf['d_b'], tf['dd_b']),
                '1': (CDM, tf['d_cdm'], tf['dd_cdm']),
                '4': (NCDM, tf['d_ncdm[0]'], tf['dd_ncdm[0]']),
            }, Q, a=0.1)

    print('0', ic.species['0'].S[0], ic.species['0'].P[0], ic.species['0'].Q[0])
    print('1', ic.species['1'].S[0], ic.species['1'].P[0], ic.species['1'].Q[0])
    print('4', ic.species['4'].S[0], ic.species['4'].P[0], ic.species['4'].Q[0])

    c2 = CoreSolver(pm, Planck15, B=1)
    Pk = lambda k: Planck15.get_pk(k, z=0)
    dlin = c2.linear(wn, Pk)
    ic2 = c2.lpt(dlin, Q, 0.1, order=1)
    print(ic2.S[0], ic2.P[0], ic2.Q[0])
    final2 = c2.nbody(ic2, leapfrog([0.1, 1.0]))

    final = solver.nbody(ic, leapfrog([0.1, 1.0]))
    print('0', final.species['0'].F[0])
    print('1', final.species['1'].F[0])
    print('4', final.species['4'].F[0])
    print(final2.F[0])

    final.to_catalog()
def main(argv):
    del argv
    """ -------------- setting paramaeters ------------------------"""
    params = FLAGS.flag_values_dict()
    params['Nmesh'] = [FLAGS.Nmesh] * 3
    params['BoxSize'] = [FLAGS.boxsize] * 3
    params['Nmesh2D'] = [FLAGS.Nmesh2D] * 2
    params['BoxSize2D'] = [FLAGS.boxsize2D] * 2
    params['zs_source'] = [float(zs) for zs in FLAGS.zs_source]

    if params['custom_cosmo']:
        cosmo = Planck15.match(Omega0_m=FLAGS.Omega_m)
        cosmo = cosmo.match(sigma8=FLAGS.sigma_8)
    else:
        if rank == 0:
            print('custom_cosmo is set to False. Using default cosmology.')
        cosmo = Planck15

    if params['save3D'] or params['save3Dpower']:
        try:
            assert (params['interpolate'] == False)
        except:
            raise ValueError(
                'interpolate must be set to False if requesting 3D outouts')
    """------- setting output dirs and saving parameters-----------"""
    dirs = {}
    if rank == 0:
        cmd = "git log --pretty=format:'%h' -n 1"
        githash = subprocess.run([cmd], stdout=subprocess.PIPE,
                                 shell=True).stdout.decode('utf-8')
        print('dumping under githash %s' % githash)
        output_path = os.path.join(FLAGS.output_path, githash)
        params_path = os.path.join(os.path.join(os.getcwd()), 'runs', githash)
        params['output_path'] = output_path
        print(params_path)
        if not os.path.isdir(params_path):
            os.makedirs(params_path)

        # make sure parameter file name is unique and we are not repeating a run
        num_run = 0
        found = True
        while found:
            path_name = os.path.join(output_path,
                                     params['label'] + '%d/' % num_run)
            params_file = os.path.join(params_path,
                                       params['label'] + '%d.json' % num_run)
            if not os.path.isdir(path_name):
                os.makedirs(path_name)
                found = False
            if not os.path.isfile(params_file):
                found = False
            else:
                with open(params_file, 'r') as f:
                    old_params = json.load(f)
                    if old_params == params and not params['debug']:
                        raise ValueError(
                            'run with same settings already exists: %s' %
                            params_file)
                    elif params['debug']:
                        found = False
                    else:
                        num_run += 1

        for result in ['cls', 'maps', 'snapshots']:
            dirs[result] = os.path.join(path_name, result)
            if not os.path.isdir(dirs[result]):
                os.makedirs(dirs[result])

        fjson = json.dumps(params)
        f = open(params_file, "w")
        f.write(fjson)
        f.close()

    dirs = comm.bcast(dirs, root=0)
    params['snapshot_dir'] = dirs['snapshots']
    """---------------------------run actual simulations-----------------------------"""
    sims_start = time.time()

    for ii in range(FLAGS.N_maps):
        if rank == 0:
            print('progress in percent:', ii / params['N_maps'] * 100)
        kmaps, kmaps_deriv, pm = run_wl_sim(params, cosmo=cosmo, num=ii)

        for jj, z_source in enumerate(params['zs_source']):
            kmap = kmaps[jj]
            mapfile = os.path.join(
                dirs['maps'], 'map_decon_zsource%d_map%d_of%d' %
                (z_source * 10, ii, params['N_maps']) + '.npy')
            save_2Dmap(kmap, mapfile)
            if rank == 0:
                print('2D map #%d at z_s=%.1f dumped to %s' %
                      (ii, z_source, mapfile))

    end = time.time()
    if rank == 0:
        print('time taken per sim in sec %d' %
              ((end - sims_start) /
               (params['N_maps'] * len(params['zs_source']))))
        print('time takes before sims in sec %d' % (sims_start - start))
def main(ns):
    if ns.zlmax is None:
        ns.zlmax = max(ns.zs)

    zs_list = ns.zs
    ###### JL hardcode zs_list
    #zs_list = numpy.arange(ns.zs, 2.21, 0.1)
    zs_list = ns.zs

    zlmin = ns.zlmin
    zlmax = zs_list[-1]#ns.zlmax

    # no need to be accurate here
    ds_list = Planck15.comoving_distance(zs_list)

    path = ns.source

    cat = BigFileCatalog(path, dataset=ns.dataset)

    kappa = 0
    Nm = 0
    kappabar = 0

    npix = healpix.nside2npix(ns.nside)
    localsize = npix * (cat.comm.rank + 1) // cat.comm.size - npix * (cat.comm.rank) // cat.comm.size
    nbar = (cat.attrs['NC'] ** 3  / cat.attrs['BoxSize'] ** 3 * cat.attrs['ParticleFraction'])[0]
 #   print('DEBUG BoxSize', cat.attrs['BoxSize'])
    
    Nsteps = int(numpy.round((zlmax - zlmin) / ns.zstep))
    if Nsteps < 2 : Nsteps = 2

    z = numpy.linspace(zlmax, zlmin, Nsteps+1, endpoint=True)

    if cat.comm.rank == 0:
        cat.logger.info("Splitting data redshift bins %s" % str(z))

    kappa_all = numpy.zeros((Nsteps, len(zs_list), localsize))
    for i, (z1, z2) in enumerate(zip(z[:-1], z[1:])):
        import gc
        gc.collect()
        if cat.comm.rank == 0:
            cat.logger.info("nbar = %g, zlmin = %g, zlmax = %g zs = %s" % (nbar, z2, z1, zs_list))

        slice = read_range(cat, 1/(1 + z1), 1 / (1 + z2))

        if slice.csize == 0: continue
        if cat.comm.rank == 0:
            cat.logger.info("read %d particles" % slice.csize)

        kappa1, kappa1bar, Nm1  = make_kappa_maps(slice, ns.nside, zs_list, ds_list, localsize, nbar)

        kappa = kappa + kappa1

        kappa_all[i] = kappa1
        
        Nm = Nm + Nm1
        kappabar = kappabar + kappa1bar

    cat.comm.barrier()

    if cat.comm.rank == 0:
        # use bigfile because it allows concurrent write to different datasets.
        cat.logger.info("writing to %s", ns.output)


    # array to get all map slices
    if cat.comm.rank == 0:
        kappa1_all = numpy.zeros((Nsteps, int(12*ns.nside**2)))
                                  
    for i, (zs, ds) in enumerate(zip(zs_list, ds_list)):
        std = numpy.std(cat.comm.allgather(len(kappa[i])))
        mean = numpy.mean(cat.comm.allgather(len(kappa[i])))
        if cat.comm.rank == 0:
            cat.logger.info("started gathering source plane %s, size-var = %g, size-bar = %g" % (zs, std, mean))

        kappa1 = GatherArray(kappa[i], cat.comm)
        Nm1 = GatherArray(Nm[i], cat.comm)

        # get slices of kappa map
        for j in range(Nsteps):
            kappa1_allj = GatherArray(kappa_all[j,i], cat.comm)
            if cat.comm.rank == 0:
                kappa1_all[j] = kappa1_allj
                
        if cat.comm.rank == 0:
            cat.logger.info("done gathering source plane %s" % zs)

        if cat.comm.rank == 0:
            fname = ns.output + "/WL-%02.2f-N%04d" % (zs, ns.nside)
            cat.logger.info("started writing source plane %s" % zs)

            with bigfile.File(fname, create=True) as ff:
                print('DEBUG', kappa1_all.shape, len(kappa1_all), numpy.dtype((kappa1_all.dtype, kappa1_all.shape[1:])))
                ds1 = ff.create_from_array("kappa", kappa1, Nfile=1)
                ds2 = ff.create_from_array("Nm", Nm1, Nfile=1)
                #ds3 = ff.create_from_array("kappa_all", kappa1_all.T, Nfile=1)#, memorylimit=1024*1024*1024)

                for d in ds1, ds2:#, ds3:
                    d.attrs['kappabar'] = kappabar[i]
                    d.attrs['nside'] = ns.nside
                    d.attrs['zlmin'] = zlmin
                    d.attrs['zlmax'] = zlmax
                    d.attrs['zstep'] = ns.zstep
                    d.attrs['zs'] = zs
                    d.attrs['ds'] = ds
                    d.attrs['nbar'] = nbar

        cat.comm.barrier()
        if cat.comm.rank == 0:
            # use bigfile because it allows concurrent write to different datasets.
            cat.logger.info("source plane at %g written. " % zs)
Exemple #14
0
def main():
    """ 
    Script to compute FOF halos from treepm DM catalog. Compute virial mass
    which is needed for HOD models. 

    Note: Before March 2020, used mass given by number of particles in halo,
    see psiRec/psirec/main_ms_gadget_fof_halofinder_nbkit0.3.py.
    """

    ap = ArgumentParser()
    ap.add_argument('treepm', 
        help='Directory of TreePM matter field, e.g. /scratch/treepm_0.1000/')
    ap.add_argument('ll', type=float, 
        help='Linking length of finding halos, e.g. 0.2 or 0.168', 
        default=0.2)
    ap.add_argument('fof', 
        help=('Output directory of halo catalogs, e.g. '
              '/scratch/treepm_0.1000/fof . Will write to {fof}/{ll_nmin_mvir}'))
    ap.add_argument('--nmin', type=int, default=20, 
        help='min number of particles to be in the catalogue')
    ap.add_argument('--with-peak', help='Find Peaks KDDensity estimation (slow)', 
        default=False)


    ns = ap.parse_args()

    cat = BigFileCatalog(ns.treepm, header='Header', dataset='1/')



    cat.attrs['BoxSize']  = np.ones(3) * cat.attrs['BoxSize'][0]
    cat.attrs['Nmesh']  = np.ones(3) * 512.0    # in TreePM catalog, there is no 'NC' attribute
    
    cosmo = Planck15.match(Omega0_m=cat.attrs['Omega0'])
    # In TreePM, we need to use 'Omega0' instead of 'OmegaM' in FastPM.
    # csize is the total number of particles
    M0 = (cat.attrs['Omega0'][0] * 27.75 * 1e10 * cat.attrs['BoxSize'].prod() 
            / cat.csize)

    redshift = 1.0/cat.attrs['Time'][0]-1.0

    if cat.comm.rank == 0:
        print('BoxSize', cat.attrs['BoxSize'])
        print('Mass of a particle', M0)
        print('OmegaM', cosmo.Om0)
        print('attrs', cat.attrs.keys())
        print('Redshift', redshift)


    if ns.with_peak:
        posdef = 'peak'
    else:
        posdef = 'cm'

    # Halos which have more than nmin particles are selected.
    fof = FOF(cat, linking_length=ns.ll, nmin=ns.nmin)  

    # Compute halo catalog. Mass column contains virial mass, which is needed
    # to get concentration needed for hod.
    halos = fof.to_halos(
        cosmo=cosmo,
        redshift=redshift,
        particle_mass=M0,
        mdef='vir',
        posdef=posdef,
        peakcolumn='Density')

    halos['log10M'] = np.log10(halos['Mass'])

    # print info
    if fof.comm.rank == 0:
        print('Total number of halos found', halos.csize)
        print('Saving columns', halos.columns)
        if not os.path.exists(ns.fof):
            os.makedirs(ns.fof)

    # Save the halo catalog to disk so can easily load it later to populate
    # galaxies with hod.
    out_fname = ns.fof + '/ll_{0:.3f}_nmin{1}_mvir'.format(ns.ll, ns.nmin+1)

    if ns.with_peak:
        out_fname += '_peakpos'

    # MS: Somehow crashes b/c some ranks don't see header file. running
    # a second time works though. maybe write header first with 
    # single rank?
    halos.save(out_fname, halos.columns)

    if fof.comm.rank == 0:
        print('Saved HaloCatalog to %s' % out_fname)
Exemple #15
0
def run_hod(cat,
            HOD_model_name=None,
            hod_seed=42,
            add_RSD=False,
            RSD_LOS=None):
    """
    Run HOD to get galaxy catalog from input halo catalog.

    Parameters
    ----------
    cat : nbodykit Catalog object
        Input halo catalog, should use virial mass as 'Mass' column.
    """
    if cat.comm.rank == 0:
        print('HOD model: %s' % HOD_model_name)
    cat.attrs['BoxSize'] = np.ones(3) * cat.attrs['BoxSize'][0]
    cat.attrs['Nmesh'] = np.ones(
        3) * 512.0  # in TreePM catalog, there is no 'NC' attribute

    cosmo = Planck15.match(Omega0_m=cat.attrs['Omega0'])
    # In TreePM, we need to use 'Omega0' instead of 'OmegaM' in FastPM.
    # csize is the total number of particles
    M0 = (cat.attrs['Omega0'][0] * 27.75 * 1e10 * cat.attrs['BoxSize'].prod() /
          cat.csize)
    redshift = 1.0 / cat.attrs['Time'][0] - 1.0

    # convert to HaloCatalog
    halos = HaloCatalog(cat, cosmo, redshift)

    if cat.comm.rank == 0:
        print('BoxSize', halos.attrs['BoxSize'])
        print('attrs', halos.attrs.keys())
        print('RSDFactor', halos.attrs['RSDFactor'])
        print('Columns', halos.columns)

    # Define HOD
    if HOD_model_name in [
            'Zheng07_HandSeljak17_v2', 'Zheng07_HandSeljak17_centrals_v2',
            'Zheng07_HandSeljak17_sats_v2',
            'Zheng07_HandSeljak17_parent_halos_v2'
    ]:

        # (1) Hand & Seljak 1706.02362:
        # Uses {log10 Mmin, sigma log10 M, log10 M1, alpha, log10 Mcut} = {12.99, 0.308, 14.08, 0.824, 13.20}.
        # See Reid et al https://arxiv.org/pdf/1404.3742.pdf eq 17-19

        # (2) halotools docs on zheng07 model:
        #  See https://halotools.readthedocs.io/en/stable/quickstart_and_tutorials/tutorials/model_building/preloaded_models/zheng07_composite_model.html#zheng07-parameters):
        # logMmin - Minimum mass required for a halo to host a central galaxy.
        # sigma_logM - Rate of transition from <Ncen>=0 -> <Ncen=1>.
        # alpha - Power law slope of the relation between halo mass and <Nsat>.
        # logM0 - Low-mass cutoff in <Nsat>.
        # logM1 - Characteristic halo mass where <Nsat> begins to assume a power law form.

        # 11 June 2020: Zheng07_HandSeljak17_v2 uses fixed RSDFactor, which was wrong by factor of 1/a before.

        hodmodel = Zheng07Model.to_halotools(cosmo=cosmo,
                                             redshift=redshift,
                                             mdef='vir')

        # HOD parameters from Hand & Seljak 1706.02362
        hodmodel.param_dict['logMmin'] = 12.99
        hodmodel.param_dict['sigma_logM'] = 0.308
        hodmodel.param_dict['logM1'] = 14.08
        hodmodel.param_dict['alpha'] = 1.06
        hodmodel.param_dict[
            'logM0'] = 13.20  # this is called Mcut in Hand et al and Reid et al.

        if cat.comm.rank == 0:
            print('Use zheng07model with:', hodmodel.param_dict)

        # Run HOD
        galcat = halos.populate(hodmodel, seed=hod_seed)

        # select which galaxies to keep
        if HOD_model_name == 'Zheng07_HandSeljak17_v2':
            # keep all
            pass

        elif HOD_model_name == 'Zheng07_HandSeljak17_centrals_v2':
            # select only centrals
            ww = galcat['gal_type'] == 0  # 0: central, 1: satellite
            galcat = galcat[ww]

        elif HOD_model_name == 'Zheng07_HandSeljak17_sats_v2':
            # select only satellites
            ww = galcat['gal_type'] == 1  # 0: central, 1: satellite
            galcat = galcat[ww]

        elif HOD_model_name == 'Zheng07_HandSeljak17_parent_halos_v2':
            # select centrals
            ww = galcat['gal_type'] == 0  # 0: central, 1: satellite
            galcat = galcat[ww]

            # set position to that of parent halo (in Mpc/h)
            halo_pos = galcat['Position'].compute() + np.nan
            halo_pos[:, 0] = galcat['halo_x'].compute()
            halo_pos[:, 1] = galcat['halo_y'].compute()
            halo_pos[:, 2] = galcat['halo_z'].compute()
            galcat['Position'] = halo_pos
            del halo_pos

            # set velocity to that of parent halo (in km/s)
            halo_vel = galcat['Velocity'].compute() + np.nan
            halo_vel[:, 0] = galcat['halo_vx'].compute()
            halo_vel[:, 1] = galcat['halo_vy'].compute()
            halo_vel[:, 2] = galcat['halo_vz'].compute()
            galcat['Velocity'] = halo_vel
            del halo_vel

            # Get RSD displacement = v_z/(aH(a)), where v_z is halo velocity.
            # Compute rsd_factor = 1/(aH(a)) = (1+z)/H(z)
            # see https://nbodykit.readthedocs.io/en/latest/catalogs/common-operations.html#Adding-Redshift-space-Distortions
            rsd_factor = (1. + redshift) / (100. * cosmo.efunc(redshift))
            raise Exception(
                'this is not correct for ms_gadget which has a^2 dx/dt for velocity.'
            )
            galcat['VelocityOffset'] = rsd_factor * galcat['Velocity']

            # columns: ['Position', 'Selection', 'Value', 'Velocity', 'VelocityOffset', 'Weight', 'conc_NFWmodel', 'gal_type', 'halo_hostid', 'halo_id', 'halo_mvir', 'halo_num_centrals', 'halo_num_satellites', 'halo_rvir', 'halo_upid', 'halo_vx', 'halo_vy', 'halo_vz', 'halo_x', 'halo_y', 'halo_z', 'host_centric_distance', 'vx', 'vy', 'vz', 'x', 'y', 'z']

    else:
        raise Exception('Unknown hod_model %s' % HOD_model_name)

    if add_RSD:
        assert type(RSD_LOS) == np.ndarray
        assert RSD_LOS.shape == (3, )
        print('cat attrs:', galcat.attrs)

        # It seems like halos.populate gives satellite velocity in km/s by drawing from NFW profile, and sets central velocity equal to halo velocity.
        # But not sure what units are assumed for halo velocity. Note we have different velocity a prefactor in ms_gadget and new MP-Gadget format.
        # Also, should probably use peak velocity instead of bulk velocity of halos for the centrals velocity.
        # So HOD just seems screwed up.
        raise Exception(
            'todo: use RSDFactor of the catalog! VelocityOffset can be wrong by factor of a if catalog has a^2 dx/dt (ms_gadget) instead of a dx/dt.'
        )

        galcat['Position'] = (galcat['Position'] +
                              galcat['VelocityOffset'] * RSD_LOS)

    if cat.comm.rank == 0:
        print('galcat', galcat)
        print('attrs', galcat.attrs)
        print('columns', galcat.columns)
        print('fsat', galcat.attrs['fsat'])

    return galcat
Exemple #16
0
from __future__ import print_function
from numpy.testing import assert_raises, assert_array_equal, assert_allclose
from numpy.testing.decorators import skipif

import numpy

from pmesh.abopt import ParticleMesh, RealField, ComplexField, check_grad

from nbodykit.cosmology import Planck15, EHPower
cosmo = Planck15.clone(Tcmb0=0)

pm = ParticleMesh(BoxSize=1.0, Nmesh=(4, 4, 4), dtype='f8')
pk = EHPower(Planck15, redshift=0)

mask2d = pm.resize([4, 4, 1]).create(mode='real')
mask2d[...] = 1.0

from cosmo4d import map2d
from cosmo4d.nbody import NBodyModel
from cosmo4d.options import UseComplexSpaceOptimizer, UseRealSpaceOptimizer


def test_map2d():
    dynamic_model = NBodyModel(cosmo, pm, 1, [1.0])
    mock_model = map2d.MockModel(dynamic_model)
    noise_model = map2d.NoiseModel(pm, mask2d, 1.0, 1234)

    initial = pm.generate_whitenoise(1234, mode='real')

    obs = mock_model.make_observable(initial)
    assert_array_equal(obs.map2d.Nmesh, (4, 4, 1))
Exemple #17
0
from runtests.mpi import MPITest
from fastpm.core import leapfrog, autostages, Solver as CoreSolver

from fastpm.state import StateVector, Matter, Baryon, CDM, NCDM
from fastpm.multi import Solver

from pmesh.pm import ParticleMesh
from nbodykit.cosmology import Planck15, EHPower
import numpy
from numpy.testing import assert_allclose

from fastpm.multi import get_species_transfer_function_from_class

Planck15 = Planck15.clone(gauge='newtonian')
@MPITest([1, 4])
def test_solver(comm):
    pm = ParticleMesh(BoxSize=512., Nmesh=[8, 8, 8], comm=comm)
    solver = Solver(pm, Planck15, B=1)

    P_prm = Planck15.Primordial.get_pkprim

    tf = get_species_transfer_function_from_class(Planck15, 9)

    Q = pm.generate_uniform_particle_grid(shift=0)

    wn = solver.whitenoise(1234)
    prm = solver.primordial(wn, P_prm)
    ic = solver.lpt(prm, {
                '0': (Baryon, tf['d_b'], tf['dd_b']),
                '1': (CDM, tf['d_cdm'], tf['dd_cdm']),
                '4': (NCDM, tf['d_ncdm[0]'], tf['dd_ncdm[0]']),
Exemple #18
0
from mpi4py import MPI
import numpy

from argparse import ArgumentParser

from nbodykit.cosmology import Planck15
from nbodykit.cosmology import EHPower
from nbodykit.cosmology.perturbation import PerturbationGrowth
from scipy.integrate import quad
PowerSpectrum = EHPower(Planck15, redshift=0.0)
pt = PerturbationGrowth(Planck15.clone(Tcmb0=0))


class FastPM:
    def K(ai, af, ar):
        return 1 / (ar**2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)

    def D(ai, af, ar):
        return 1 / (ar**3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)


class FastPM1:
    def K(ai, af, ar):
        def func(a):
            return 1.0 / (a * a * pt.E(a))

        return quad(func, ai, af)[0]

    def D(ai, af, ar):
        return 1 / (ar**3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)
Exemple #19
0
from mpi4py import MPI
import numpy

from argparse import ArgumentParser

from nbodykit.cosmology import Planck15
from nbodykit.cosmology import EHPower
from nbodykit.cosmology.perturbation import PerturbationGrowth
from scipy.integrate import quad
PowerSpectrum = EHPower(Planck15, redshift=0.0)
pt = PerturbationGrowth(Planck15.clone(Tcmb0=0))

class FastPM:
    def K(ai, af, ar):
        return 1 / (ar ** 2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)
    def D(ai, af, ar):
        return 1 / (ar ** 3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)

class FastPM1:
    def K(ai, af, ar):
        def func(a):
            return 1.0 / (a * a * pt.E(a))
        return quad(func, ai, af)[0]
    def D(ai, af, ar):
        return 1 / (ar ** 3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)

class FastPM2:
    def K(ai, af, ar):
        return 1 / (ar ** 2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)
    def D(ai, af, ar):
        def func(a):
Exemple #20
0
def main(ns):
    if ns.zlmax is None:
        ns.zlmax = max(ns.zs)

    zs_list = ns.zs

    zlmin = ns.zlmin
    zlmax = ns.zlmax

    # no need to be accurate here
    ds_list = Planck15.comoving_distance(zs_list)

    path = ns.source
    #'/global/cscratch1/sd/yfeng1/m3127/desi/1536-9201-40eae2464/lightcone/usmesh/'

    cat = BigFileCatalog(path, dataset=ns.dataset)

    kappa = 0
    Nm = 0
    kappabar = 0

    npix = healpix.nside2npix(ns.nside)
    localsize = npix * (cat.comm.rank + 1) // cat.comm.size - npix * (
        cat.comm.rank) // cat.comm.size
    nbar = (cat.attrs['NC']**3 / cat.attrs['BoxSize']**3 *
            cat.attrs['ParticleFraction'])[0]

    Nsteps = int(numpy.round((zlmax - zlmin) / ns.zstep))
    if Nsteps < 2: Nsteps = 2
    z = numpy.linspace(zlmax, zlmin, Nsteps, endpoint=True)

    if cat.comm.rank == 0:
        cat.logger.info("Splitting data redshift bins %s" % str(z))

    for z1, z2 in zip(z[:-1], z[1:]):
        import gc
        gc.collect()
        if cat.comm.rank == 0:
            cat.logger.info("nbar = %g, zlmin = %g, zlmax = %g zs = %s" %
                            (nbar, z2, z1, zs_list))

        slice = read_range(cat, 1 / (1 + z1), 1 / (1 + z2))

        if slice.csize == 0: continue
        if cat.comm.rank == 0:
            cat.logger.info("read %d particles" % slice.csize)

        kappa1, kappa1bar, Nm1 = make_kappa_maps(slice, ns.nside, zs_list,
                                                 ds_list, localsize, nbar)

        kappa = kappa + kappa1
        Nm = Nm + Nm1
        kappabar = kappabar + kappa1bar

    cat.comm.barrier()

    if cat.comm.rank == 0:
        # use bigfile because it allows concurrent write to different datasets.
        cat.logger.info("writing to %s", ns.output)

    for i, (zs, ds) in enumerate(zip(zs_list, ds_list)):
        std = numpy.std(cat.comm.allgather(len(kappa[i])))
        mean = numpy.mean(cat.comm.allgather(len(kappa[i])))
        if cat.comm.rank == 0:
            cat.logger.info(
                "started gathering source plane %s, size-var = %g, size-bar = %g"
                % (zs, std, mean))

        kappa1 = GatherArray(kappa[i], cat.comm)
        Nm1 = GatherArray(Nm[i], cat.comm)

        if cat.comm.rank == 0:
            cat.logger.info("done gathering source plane %s" % zs)

        if cat.comm.rank == 0:
            fname = ns.output + "/WL-%02.2f-N%04d" % (zs, ns.nside)
            cat.logger.info("started writing source plane %s" % zs)

            with bigfile.File(fname, create=True) as ff:

                ds1 = ff.create_from_array("kappa", kappa1, Nfile=1)
                ds2 = ff.create_from_array("Nm", Nm1, Nfile=1)

                for d in ds1, ds2:
                    d.attrs['kappabar'] = kappabar[i]
                    d.attrs['nside'] = ns.nside
                    d.attrs['zlmin'] = zlmin
                    d.attrs['zlmax'] = zlmax
                    d.attrs['zs'] = zs
                    d.attrs['ds'] = ds
                    d.attrs['nbar'] = nbar

        cat.comm.barrier()
        if cat.comm.rank == 0:
            # use bigfile because it allows concurrent write to different datasets.
            cat.logger.info("source plane at %g written. " % zs)
            indata.copy('extract/log_prob', outdata['distill'])
        except KeyError:
            pass
        outdata.create_dataset('distill/chain', data=distilled_chains[0])

    with hp.File(infile, 'r') as indata, hp.File(outfile_1, 'w') as outdata:
        outdata.create_group('distill')
        try:
            indata.copy('extract/log_prob', outdata['distill'])
        except KeyError:
            pass
        outdata.create_dataset('distill/chain', data=distilled_chains[1])

    logger.info("Distilled chains saved to %s and %s.\n", outfile_0, outfile_1)


SAVE = True
SAVEFIG = True
if __name__ == '__main__':

    progrc = initialise()

    b_1 = 1.2 / Planck15.scale_independent_growth_factor(progrc.redshift)

    input_chain = load_samples()

    with Pool() as mpool:
        distilled_chains = distill_corrections(input_chain, pool=mpool)

    save_distilled()
Exemple #22
0
prefix = 'test'
fname = 's%d_%s' % (seed, prefix)
optfolder = ofolder + 'opt_%s/' % fname
if truth_pm.comm.rank == 0: print('Output Folder is %s' % optfolder)

for folder in [ofolder, optfolder]:
    try:
        os.makedirs(folder)
    except:
        pass

#initiate

klin, plin = numpy.loadtxt('../data/pklin_1.0000.txt', unpack=True)
ipk = interpolate(klin, plin)
cosmo = Planck15.clone(Omega_cdm=0.2685, h=0.6711, Omega_b=0.049)

##################################################################################
##setup the model
##
stages = numpy.linspace(0.1, aa, nsteps, endpoint=True)
dynamic_model = NBodyModel(cosmo, truth_pm, B=B, steps=stages)
#dynamic_model = ZAModel(cosmo, truth_pm, B=B, steps=stages)

#noise
#Artifically low noise since the data is constructed from the model
truth_noise_model = map.NoiseModel(
    truth_pm, None,
    noisevar * (truth_pm.BoxSize / truth_pm.Nmesh).prod(), 1234)
mock_model = map.MockModel(dynamic_model, params=[2, 1, 0.5])
Exemple #23
0
    def __init__(self, githash, label, rnum, local_path, alter_path=None):
        """
        loads the parameter file of the run
        githash: string, abridged githash of commit under which the run was performed
        label  : string, label of the run
        rnum   : int, number of run under this label and githash
        local_path: string, path under which parameter files have been stored 
        """

        #-------------------------------------------------------------#
        params_path = os.path.join(local_path, 'runs', githash)
        params_file = os.path.join(params_path, label + '%d.json' % rnum)
        with open(params_file, 'r') as f:
            self.params = json.load(f)
        if alter_path is None:
            path_name = os.path.join(self.params['output_path'],
                                     self.params['label'] + '%d/' % rnum)
        else:
            path_name = os.path.join(alter_path,
                                     self.params['label'] + '%d/' % rnum)

        self.dirs = {}
        for result in ['cls', 'maps', 'snapshots']:
            self.dirs[result] = os.path.join(path_name, result)
        #-------------------------------------------------------------#

        cosmo = Planck15.match(Omega0_m=self.params['Omega_m'])
        self.cosmo = cosmo.match(sigma8=self.params['sigma_8'])

        self.pm = ParticleMesh(Nmesh=self.params['Nmesh'],
                               BoxSize=self.params['BoxSize'],
                               resampler='cic')

        BoxSize2D = [deg / 180. * np.pi for deg in self.params['BoxSize2D']]
        self.pm2D = ParticleMesh(BoxSize=BoxSize2D,
                                 Nmesh=self.params['Nmesh2D'],
                                 resampler='cic')

        z_int = np.logspace(-8, np.log10(1500), 10000)
        chis = cosmo.comoving_distance(z_int)  #Mpc/h
        self.z_chi_int = scipy.interpolate.interp1d(chis,
                                                    z_int,
                                                    kind=3,
                                                    bounds_error=False,
                                                    fill_value=0.)

        self.theory_cls = {}
        self.measured_cls = {}
        print(
            'Loading run with BoxSize %d, Nmesh %d, SourceRedshift %.2f, PGD %s and interpolation %s.'
            % (self.params['BoxSize'][0], self.params['Nmesh'][0],
               self.params['zs_source'][0], str(
                   self.params['PGD']), str(self.params['interpolate'])))

        # count how many maps have been dumped
        NN = len(os.listdir(self.dirs['maps']))
        if NN < self.params['N_maps']:
            print(
                'less maps produces than requested. Requested:%d Produced:%d' %
                (self.params['N_maps'], NN))
        self.N_maps = NN
        print('%d maps were produced in this run' % self.N_maps)

        self.Nyquist_3D = np.pi * self.pm.Nmesh[0] / self.pm.BoxSize[0]
        self.Nyquist_2D = np.pi * self.pm2D.Nmesh[0] / self.pm2D.BoxSize[0]